path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
Real_Data/Cusanovich_2018_subset/extra_clustering/scABC/scABC_cusanovich2018subset.ipynb | ###Markdown
Import packages
###Code
options(stringsAsFactors = FALSE)
library(GenomicRanges)
library(scABC)
library(Rsamtools)
library(data.table)
library(dplyr)
library(tidyverse)
library(Matrix)
library(gplots)
library(RColorBrewer)
library(devtools)
source_url("https://raw.githubusercontent.com/obigriffith/biostar-tutorials/master/Heatmaps/heatmap.3.R")
###Output
Loading required package: stats4
Loading required package: BiocGenerics
Loading required package: parallel
Attaching package: ‘BiocGenerics’
The following objects are masked from ‘package:parallel’:
clusterApply, clusterApplyLB, clusterCall, clusterEvalQ,
clusterExport, clusterMap, parApply, parCapply, parLapply,
parLapplyLB, parRapply, parSapply, parSapplyLB
The following objects are masked from ‘package:stats’:
IQR, mad, sd, var, xtabs
The following objects are masked from ‘package:base’:
anyDuplicated, append, as.data.frame, basename, cbind, colMeans,
colnames, colSums, dirname, do.call, duplicated, eval, evalq,
Filter, Find, get, grep, grepl, intersect, is.unsorted, lapply,
lengths, Map, mapply, match, mget, order, paste, pmax, pmax.int,
pmin, pmin.int, Position, rank, rbind, Reduce, rowMeans, rownames,
rowSums, sapply, setdiff, sort, table, tapply, union, unique,
unsplit, which, which.max, which.min
Loading required package: S4Vectors
Attaching package: ‘S4Vectors’
The following object is masked from ‘package:base’:
expand.grid
Loading required package: IRanges
Loading required package: GenomeInfoDb
Warning message:
“replacing previous import ‘IRanges::which’ by ‘Matrix::which’ when loading ‘scABC’”Loading required package: Biostrings
Loading required package: XVector
Attaching package: ‘Biostrings’
The following object is masked from ‘package:base’:
strsplit
Attaching package: ‘data.table’
The following object is masked from ‘package:GenomicRanges’:
shift
The following object is masked from ‘package:IRanges’:
shift
The following objects are masked from ‘package:S4Vectors’:
first, second
Attaching package: ‘dplyr’
The following objects are masked from ‘package:data.table’:
between, first, last
The following objects are masked from ‘package:Biostrings’:
collapse, intersect, setdiff, setequal, union
The following object is masked from ‘package:XVector’:
slice
The following objects are masked from ‘package:GenomicRanges’:
intersect, setdiff, union
The following object is masked from ‘package:GenomeInfoDb’:
intersect
The following objects are masked from ‘package:IRanges’:
collapse, desc, intersect, setdiff, slice, union
The following objects are masked from ‘package:S4Vectors’:
first, intersect, rename, setdiff, setequal, union
The following objects are masked from ‘package:BiocGenerics’:
combine, intersect, setdiff, union
The following objects are masked from ‘package:stats’:
filter, lag
The following objects are masked from ‘package:base’:
intersect, setdiff, setequal, union
── Attaching packages ─────────────────────────────────────── tidyverse 1.2.1 ──
✔ ggplot2 3.1.0 ✔ readr 1.3.1
✔ tibble 2.1.1 ✔ purrr 0.3.2
✔ tidyr 0.8.3 ✔ stringr 1.4.0
✔ ggplot2 3.1.0 ✔ forcats 0.4.0
── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
✖ dplyr::between() masks data.table::between()
✖ dplyr::collapse() masks Biostrings::collapse(), IRanges::collapse()
✖ dplyr::combine() masks BiocGenerics::combine()
✖ purrr::compact() masks XVector::compact()
✖ dplyr::desc() masks IRanges::desc()
✖ tidyr::expand() masks S4Vectors::expand()
✖ dplyr::filter() masks stats::filter()
✖ dplyr::first() masks data.table::first(), S4Vectors::first()
✖ dplyr::lag() masks stats::lag()
✖ dplyr::last() masks data.table::last()
✖ ggplot2::Position() masks BiocGenerics::Position(), base::Position()
✖ purrr::reduce() masks GenomicRanges::reduce(), IRanges::reduce()
✖ dplyr::rename() masks S4Vectors::rename()
✖ dplyr::slice() masks XVector::slice(), IRanges::slice()
✖ purrr::transpose() masks data.table::transpose()
Attaching package: ‘Matrix’
The following object is masked from ‘package:tidyr’:
expand
The following object is masked from ‘package:S4Vectors’:
expand
Attaching package: ‘gplots’
The following object is masked from ‘package:IRanges’:
space
The following object is masked from ‘package:S4Vectors’:
space
The following object is masked from ‘package:stats’:
lowess
SHA-1 hash of file is 015fc0457e61e3e93a903e69a24d96d2dac7b9fb
###Markdown
Load Data
###Code
load(file = '../../run_methods/scABC/scABC_cusanovich2018subset.RData')
heatmap.3(InSilicoCell2LandmarkCorrelation_sorted, dendrogram='none', Rowv=FALSE, Colv=FALSE,
trace='none', col = scalered, margin = c(5, 5), density.info = "none",
RowSideColors = rowcols, RowSideColorsSize=2, symm=F,symkey=F,
symbreaks=F, scale="none",)
legend("bottomleft", legend = c(unique(cell.labels), paste0("cluster ", 1:length(unique(metadata$label)))),
col = c(rcols1, rcols2), border=FALSE, bty="n", y.intersp = 0.7, cex=0.7, pch = 15)
tail(InSilicoLandMarkAssignments_sorted)
result = data.frame('scABC'=InSilicoLandMarkAssignments_sorted)
rownames(result) = sapply(strsplit(basename(rownames(result)),'\\.'),'[', 2)
df_pre = result
df_pre$ord = 1:nrow(df_pre)
df_pre = df_pre[as.character(rownames(metadata)),]
df_out = data.frame('scABC'=df_pre[,'scABC'])
rownames(df_out) = rownames(df_pre)
write.table(result,file="clusteringSolution.tsv", quote=FALSE, sep='\t', col.names = NA)
###Output
_____no_output_____ |
mobileapp/voice-emotion-detection/mini-proj-example-code.ipynb | ###Markdown
Code from [here](https://data-flair.training/blogs/python-mini-project-speech-emotion-recognition/)
###Code
# Import the necessary Modules
import librosa
import soundfile
import os, glob, pickle
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score
#DataFlair - Extract features (mfcc, chroma, mel) from a sound file
def extract_feature(file_name, mfcc, chroma, mel):
with soundfile.SoundFile(file_name) as sound_file:
X = sound_file.read(dtype="float32")
sample_rate=sound_file.samplerate
if chroma:
stft=np.abs(librosa.stft(X))
result=np.array([])
if mfcc:
mfccs=np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T, axis=0)
result=np.hstack((result, mfccs))
if chroma:
chroma=np.mean(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T,axis=0)
result=np.hstack((result, chroma))
if mel:
mel=np.mean(librosa.feature.melspectrogram(X, sr=sample_rate).T,axis=0)
result=np.hstack((result, mel))
return result
#DataFlair - Emotions in the RAVDESS dataset
emotions={
'01':'neutral',
'02':'calm',
'03':'happy',
'04':'sad',
'05':'angry',
'06':'fearful',
'07':'disgust',
'08':'surprised'
}
#DataFlair - Emotions to observe
observed_emotions=['calm', 'happy', 'fearful', 'disgust']
#DataFlair - Load the data and extract features for each sound file
def load_data(test_size=0.2):
x,y=[],[]
for file in glob.glob("ravdess-dataset\\Actor_*\\*.wav"):
file_name=os.path.basename(file)
emotion=emotions[file_name.split("-")[2]]
if emotion not in observed_emotions:
continue
feature=extract_feature(file, mfcc=True, chroma=True, mel=True)
x.append(feature)
y.append(emotion)
return train_test_split(np.array(x), y, test_size=test_size, random_state=9)
#DataFlair - Split the dataset
x_train,x_test,y_train,y_test=load_data(test_size=0.25)
#DataFlair - Get the shape of the training and testing datasets
print((x_train.shape[0], x_test.shape[0]))
#DataFlair - Get the number of features extracted
print(f'Features extracted: {x_train.shape[1]}')
#DataFlair - Initialize the Multi Layer Perceptron Classifier
model=MLPClassifier(alpha=0.01, batch_size=256, epsilon=1e-08, hidden_layer_sizes=(300,), learning_rate='adaptive', max_iter=1000)
#DataFlair - Train the model
model.fit(x_train,y_train)
#DataFlair - Predict for the test set
y_pred=model.predict(x_test)
#DataFlair - Calculate the accuracy of our model
accuracy=accuracy_score(y_true=y_test, y_pred=y_pred)
#DataFlair - Print the accuracy
print("Accuracy: {:.2f}%".format(accuracy*100))
###Output
Accuracy: 68.23%
|
SeedResults.ipynb | ###Markdown
lets get the data for the mobile net comparison
###Code
import pickle
import numpy as np
import os
META_RESULT_MATRIX = []
for file_names in os.listdir("performances/"):
if not file_names.endswith(".txt"): continue
with open(f"performances/{file_names}") as f:
row = []
for line in f.readlines():
row.append(float(line[line.index(':')+1:].strip()))
META_RESULT_MATRIX.append(row)
AVERAGE_AUROCS = np.array(META_RESULT_MATRIX)[:, 8]
META_RESULT_MATRIX = np.array(META_RESULT_MATRIX)[:, :8]
AVERAGE_AUROCS = np.array([np.mean(five_aurocs) for five_aurocs in np.array_split(AVERAGE_AUROCS, 100)])
# last thing: get the average tprs
TPRS = []
for file_names in os.listdir("tpr_files/"):
if not file_names.endswith(".pickle"): continue
with open(f"tpr_files/" + file_names, "rb") as f :
TPRS.append(pickle.load(f))
TPRS = np.array(TPRS)
TPRS = np.array([np.mean(five_tprs, axis = 0) for five_tprs in np.array_split(TPRS, 100)])
with open("mobile_net_hundred_seeds_results.pickle", 'wb') as f:
pickle.dump((META_RESULT_MATRIX, AVERAGE_AUROCS, TPRS), f)
###Output
_____no_output_____
###Markdown
lets run p-value tests on all (validation) results for all metrics
###Code
import pickle, numpy as np, os
with open("mean_landmark_hundred_seeds_results", 'rb') as f:
(MEAN_META_RESULT_MATRIX, AVERAGE_AUROCS, AVERAGE_TPRS) = pickle.load(f)
with open("one_landmark_hundred_seeds_results", 'rb') as f:
(ONE_META_RESULT_MATRIX, AVERAGE_AUROCS, AVERAGE_TPRS) = pickle.load(f)
with open("all_landmarks_hundred_seeds_results", 'rb') as f:
(ALL_META_RESULT_MATRIX, AVERAGE_AUROCS, AVERAGE_TPRS) = pickle.load(f)
with open("six_landmarks_hundred_seeds_results", 'rb') as f:
(SIX_META_RESULT_MATRIX, AVERAGE_AUROCS, AVERAGE_TPRS) = pickle.load(f)
with open("mobile_net_hundred_seeds_results.pickle", 'rb') as f:
(MB_META_RESULT_MATRIX, AVERAGE_AUROCS, TPRS) = pickle.load(f)
np.std(AVERAGE_AUROCS, axis =0 )
def boil(matrix):
matrix = np.array_split(matrix, 100)
return np.array([np.mean(fold_results, axis = 0) for fold_results in matrix])
MB_META_RESULT_MATRIX, MEAN_META_RESULT_MATRIX, ONE_META_RESULT_MATRIX, ALL_META_RESULT_MATRIX, SIX_META_RESULT_MATRIX = [np.array(mat) for mat in [MB_META_RESULT_MATRIX, MEAN_META_RESULT_MATRIX, ONE_META_RESULT_MATRIX, ALL_META_RESULT_MATRIX, SIX_META_RESULT_MATRIX]]
MB_META_RESULT_MATRIX, MEAN_META_RESULT_MATRIX, ONE_META_RESULT_MATRIX, ALL_META_RESULT_MATRIX, SIX_META_RESULT_MATRIX = list(map(boil, [MB_META_RESULT_MATRIX, MEAN_META_RESULT_MATRIX, ONE_META_RESULT_MATRIX, ALL_META_RESULT_MATRIX, SIX_META_RESULT_MATRIX]))
approaches = ['mean', 'one', 'all', 'six', 'mobile net']
metrics = list(map(lambda x : "validation " + x, ["accuracy", "precision", "recall", "F1"]))
approach_matrix = {approach : matrix for approach, matrix in zip(approaches, [MEAN_META_RESULT_MATRIX, ONE_META_RESULT_MATRIX, ALL_META_RESULT_MATRIX, SIX_META_RESULT_MATRIX, MB_META_RESULT_MATRIX])}
metrics
from scipy.stats import ttest_ind
for i in range(5):
for j in range(i + 1, 5):
print(f"{approaches[i]} vs {approaches[j]}")
for k in range(4): # only 4 metrics
p_value = ttest_ind(approach_matrix[approaches[i]][:, 4 + k], approach_matrix[approaches[j]][:, 4 + k]).pvalue
print(f"for {metrics[k]} : {p_value}, statistically significant : {p_value <= 0.005}")
print("\n")
###Output
mean vs one
for validation accuracy : 0.6677298768921249, statistically significant : False
for validation precision : 0.8498462215261376, statistically significant : False
for validation recall : 0.41682879441214116, statistically significant : False
for validation F1 : 0.4992904697063305, statistically significant : False
mean vs all
for validation accuracy : 5.538265534222322e-06, statistically significant : True
for validation precision : 1.650578472857774e-05, statistically significant : True
for validation recall : 0.15397328926548498, statistically significant : False
for validation F1 : 0.0015635287494645547, statistically significant : True
mean vs six
for validation accuracy : 1.3820978126569102e-12, statistically significant : True
for validation precision : 4.414133943807913e-09, statistically significant : True
for validation recall : 0.5911320307512684, statistically significant : False
for validation F1 : 3.4204992548060963e-07, statistically significant : True
mean vs mobile net
for validation accuracy : 9.108216215146437e-87, statistically significant : True
for validation precision : 1.5796082666571625e-67, statistically significant : True
for validation recall : 9.812296442250122e-25, statistically significant : True
for validation F1 : 2.3217124924065738e-64, statistically significant : True
one vs all
for validation accuracy : 2.3364389601819195e-05, statistically significant : True
for validation precision : 7.797161703050459e-06, statistically significant : True
for validation recall : 0.009891807294584729, statistically significant : False
for validation F1 : 0.014722388841188812, statistically significant : False
one vs six
for validation accuracy : 4.998551068561382e-12, statistically significant : True
for validation precision : 1.970241660876049e-09, statistically significant : True
for validation recall : 0.6565656358715914, statistically significant : False
for validation F1 : 6.938870923445502e-06, statistically significant : True
one vs mobile net
for validation accuracy : 1.8157288468757928e-88, statistically significant : True
for validation precision : 3.315085134437714e-67, statistically significant : True
for validation recall : 2.0555883117005346e-24, statistically significant : True
for validation F1 : 1.3195881820173389e-64, statistically significant : True
all vs six
for validation accuracy : 6.90104230681058e-05, statistically significant : True
for validation precision : 0.006843791169350617, statistically significant : False
for validation recall : 0.004258697174221717, statistically significant : True
for validation F1 : 0.0011347005570090155, statistically significant : True
all vs mobile net
for validation accuracy : 1.2404756019850238e-98, statistically significant : True
for validation precision : 6.904486373855561e-85, statistically significant : True
for validation recall : 5.325705098317206e-47, statistically significant : True
for validation F1 : 6.430008176662873e-86, statistically significant : True
six vs mobile net
for validation accuracy : 7.71673318373441e-91, statistically significant : True
for validation precision : 9.799536709781846e-81, statistically significant : True
for validation recall : 1.071979099435492e-36, statistically significant : True
for validation F1 : 2.3802796871017646e-76, statistically significant : True
|
docs/notebooks/tutorial/post_estimation.ipynb | ###Markdown
Post-Estimation Tutorial
###Code
%matplotlib inline
import pyblp
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
pyblp.options.digits = 2
pyblp.options.verbose = False
pyblp.__version__
###Output
_____no_output_____
###Markdown
This tutorial covers several features of `pyblp` which are available after estimation including:1. Calculating elasticities and diversion ratios.2. Calculating marginal costs and markups.3. Computing the effects of mergers: prices, shares, and HHI.4. Using a parametric bootstrap to estimate standard errors.5. Estimating optimal instruments. Problem ResultsAs in the [fake cereal tutorial](nevo.ipynb), we'll first solve the fake cereal problem from :ref:`references:Nevo (2000)`. We load the fake data and estimate the model as in the previous tutorial. We output the setup of the model to confirm we have correctly configured the :class:`Problem`
###Code
product_data = pd.read_csv(pyblp.data.NEVO_PRODUCTS_LOCATION)
agent_data = pd.read_csv(pyblp.data.NEVO_AGENTS_LOCATION)
product_formulations = (
pyblp.Formulation('0 + prices', absorb='C(product_ids)'),
pyblp.Formulation('1 + prices + sugar + mushy')
)
agent_formulation = pyblp.Formulation('0 + income + income_squared + age + child')
problem = pyblp.Problem(product_formulations, product_data, agent_formulation, agent_data)
problem
###Output
_____no_output_____
###Markdown
We'll solve the problem in the same way as before. The :meth:`Problem.solve` method returns a :meth:`ProblemResults` class, which displays basic estimation results. The results that are displayed are simply formatted information extracted from various class attributes such as :attr:`ProblemResults.sigma` and :attr:`ProblemResults.sigma_se`.
###Code
initial_sigma = np.diag([0.3302, 2.4526, 0.0163, 0.2441])
initial_pi = [
[ 5.4819, 0, 0.2037, 0 ],
[15.8935, -1.2000, 0, 2.6342],
[-0.2506, 0, 0.0511, 0 ],
[ 1.2650, 0, -0.8091, 0 ]
]
results = problem.solve(
initial_sigma,
initial_pi,
optimization=pyblp.Optimization('bfgs', {'gtol': 1e-5}),
method='1s'
)
results
###Output
_____no_output_____
###Markdown
Additional post-estimation outputs can be computed with :class:`ProblemResults` methods. Elasticities and Diversion RatiosWe can estimate elasticities, $\varepsilon$, and diversion ratios, $\mathscr{D}$, with :meth:`ProblemResults.compute_elasticities` and :meth:`ProblemResults.compute_diversion_ratios`.As a reminder, elasticities in each market are$$\varepsilon_{jk} = \frac{x_k}{s_j}\frac{\partial s_j}{\partial x_k}.$$Diversion ratios are$$\mathscr{D}_{jk} = -\frac{\partial s_k}{\partial x_j} \Big/ \frac{\partial s_j}{\partial x_j}.$$Following :ref:`references:Conlon and Mortimer (2018)`, we report the diversion to the outside good $D_{j0}$ on the diagonal instead of $D_{jj}=-1$.
###Code
elasticities = results.compute_elasticities()
diversions = results.compute_diversion_ratios()
###Output
_____no_output_____
###Markdown
Post-estimation outputs are computed for each market and stacked. We'll use [matplotlib](https://matplotlib.org/) functions to display the matrices associated with a single market.
###Code
single_market = product_data['market_ids'] == 'C01Q1'
plt.colorbar(plt.matshow(elasticities[single_market]));
plt.colorbar(plt.matshow(diversions[single_market]));
###Output
_____no_output_____
###Markdown
The diagonal of the first image consists of own elasticities and the diagonal of the second image consists of diversion ratios to the outside good. As one might expect, own price elasticities are large and negative while cross-price elasticities are positive but much smaller.Elasticities and diversion ratios can be computed with respect to variables other than `prices` with the `name` argument of :meth:`ProblemResults.compute_elasticities` and :meth:`ProblemResults.compute_diversion_ratios`. Additionally, :meth:`ProblemResults.compute_long_run_diversion_ratios` can be used to used to understand substitution when products are eliminated from the choice set.The convenience methods :meth:`ProblemResults.extract_diagonals` and :meth:`ProblemResults.extract_diagonal_means` can be used to extract information about own elasticities of demand from elasticity matrices.
###Code
means = results.extract_diagonal_means(elasticities)
###Output
_____no_output_____
###Markdown
An alternative to summarizing full elasticity matrices is to use :meth:`ProblemResults.compute_aggregate_elasticities` to estimate aggregate elasticities of demand, $E$, in each market, which reflect the change in total sales under a proportional sales tax of some factor.
###Code
aggregates = results.compute_aggregate_elasticities(factor=0.1)
###Output
_____no_output_____
###Markdown
Since demand for an entire product category is generally less elastic than the average elasticity of individual products, mean own elasticities are generally larger in magnitude than aggregate elasticities.
###Code
plt.hist(
[means.flatten(), aggregates.flatten()],
color=['red', 'blue'],
bins=50
);
plt.legend(['Mean Own Elasticities', 'Aggregate Elasticities']);
###Output
_____no_output_____
###Markdown
Marginal Costs and MarkupsTo compute marginal costs, $c$, the `product_data` passed to :class:`Problem` must have had a `firm_ids` field. Since we included firm IDs when configuring the problem, we can use :meth:`ProblemResults.compute_costs`.
###Code
costs = results.compute_costs()
plt.hist(costs, bins=50);
plt.legend(["Marginal Costs"]);
###Output
_____no_output_____
###Markdown
Other methods that compute supply-side outputs often compute marginal costs themselves. For example, :meth:`ProblemResults.compute_markups` will compute marginal costs when estimating markups, $\mathscr{M}$, but computation can be sped up if we just use our pre-computed values.
###Code
markups = results.compute_markups(costs=costs)
plt.hist(markups, bins=50);
plt.legend(["Markups"]);
###Output
_____no_output_____
###Markdown
MergersBefore computing post-merger outputs, we'll supplement our pre-merger markups with some other outputs. We'll compute Herfindahl-Hirschman Indices, $\text{HHI}$, with :meth:`ProblemResults.compute_hhi`; population-normalized gross expected profits, $\pi$, with :meth:`ProblemResults.compute_profits`; and population-normalized consumer surpluses, $\text{CS}$, with :meth:`ProblemResults.compute_consumer_surpluses`.
###Code
hhi = results.compute_hhi()
profits = results.compute_profits(costs=costs)
cs = results.compute_consumer_surpluses()
###Output
_____no_output_____
###Markdown
To compute post-merger outputs, we'll create a new set of firm IDs that represent a merger of firms ``2`` and ``1``.
###Code
product_data['merger_ids'] = product_data['firm_ids'].replace(2, 1)
###Output
_____no_output_____
###Markdown
We can use :meth:`ProblemResults.compute_approximate_prices` or :meth:`ProblemResults.compute_prices` to estimate post-merger prices. The first method, which is discussed, for example, in :ref:`references:Nevo (1997)`, assumes that shares and their price derivatives are unaffected by the merger. The second method does not make these assumptions and iterates over the $\zeta$-markup equation from :ref:`references:Morrow and Skerlos (2011)` to solve the full system of $J_t$ equations and $J_t$ unknowns in each market $t$. We'll use the latter, since it is fast enough for this example problem.
###Code
changed_prices = results.compute_prices(
firm_ids=product_data['merger_ids'],
costs=costs
)
###Output
_____no_output_____
###Markdown
We'll compute post-merger shares with :meth:`ProblemResults.compute_shares`.
###Code
changed_shares = results.compute_shares(changed_prices)
###Output
_____no_output_____
###Markdown
Post-merger prices and shares are used to compute other post-merger outputs. For example, $\text{HHI}$ increases.
###Code
changed_hhi = results.compute_hhi(
firm_ids=product_data['merger_ids'],
shares=changed_shares
)
plt.hist(changed_hhi - hhi, bins=50);
plt.legend(["HHI Changes"]);
###Output
_____no_output_____
###Markdown
Markups, $\mathscr{M}$, and profits, $\pi$, generally increase as well.
###Code
changed_markups = results.compute_markups(changed_prices, costs)
plt.hist(changed_markups - markups, bins=50);
plt.legend(["Markup Changes"]);
changed_profits = results.compute_profits(changed_prices, changed_shares, costs)
plt.hist(changed_profits - profits, bins=50);
plt.legend(["Profit Changes"]);
###Output
_____no_output_____
###Markdown
On the other hand, consumer surpluses, $\text{CS}$, generally decrease.
###Code
changed_cs = results.compute_consumer_surpluses(changed_prices)
plt.hist(changed_cs - cs, bins=50);
plt.legend(["Consumer Surplus Changes"]);
###Output
_____no_output_____
###Markdown
Bootstrapping ResultsPost-estimation outputs can be informative, but they don't mean much without a sense sample-to-sample variability. One way to estimate confidence intervals for post-estimation outputs is with a standard bootstrap procedure:1. Construct a large number of bootstrap samples by sampling with replacement from the original product data.2. Initialize and solve a :class:`Problem` for each bootstrap sample.3. Compute the desired post-estimation output for each bootstrapped :class:`ProblemResults` and from the resulting empirical distribution, construct boostrap confidence intervals.Although appealing because of its simplicity, the computational resources required for this procedure are often prohibitively expensive. Furthermore, human oversight of the optimization routine is often required to determine whether the routine ran into any problems and if it successfully converged. Human oversight of estimation for each bootstrapped problem is usually not feasible.A more reasonable alternative is a parametric bootstrap procedure:1. Construct a large number of draws from the estimated joint distribution of parameters.2. Compute the implied mean utility, $\delta$, and shares, $s$, for each draw. If a supply side was estimated, also computed the implied marginal costs, $c$, and prices, $p$.3. Compute the desired post-estimation output under each of these parametric bootstrap samples. Again, from the resulting empirical distribution, construct boostrap confidence intervals.Compared to the standard bootstrap procedure, the parametric bootstrap requires far fewer computational resources, and is simple enough to not require human oversight of each bootstrap iteration. The primary complication to this procedure is that when supply is estimated, equilibrium prices and shares need to be computed for each parametric bootstrap sample by iterating over the $\zeta$-markup equation from :ref:`references:Morrow and Skerlos (2011)`. Although nontrivial, this fixed point iteration problem is much less demanding than the full optimization routine required to solve the BLP problem from the start.An empirical distribution of results computed according to this parametric bootstrap procedure can be created with the :meth:`ProblemResults.bootstrap` method, which returns a :class:`BootstrappedResults` class that can be used just like :class:`ProblemResults` to compute various post-estimation outputs. The difference is that :class:`BootstrappedResults` methods return arrays with an extra first dimension, along which bootstrapped results are stacked.We'll construct 90% parametric bootstrap confidence intervals for estimated mean own elasticities in each market of the fake cereal problem. Usually, bootstrapped confidence intervals should be based on thousands of draws, but we'll only use a few for the sake of speed in this example.
###Code
bootstrapped_results = results.bootstrap(draws=100, seed=0)
bootstrapped_results
bounds = np.percentile(
bootstrapped_results.extract_diagonal_means(
bootstrapped_results.compute_elasticities()
),
q=[10, 90],
axis=0
)
table = pd.DataFrame(index=problem.unique_market_ids, data={
'Lower Bound': bounds[0].flatten(),
'Mean Own Elasticity': aggregates.flatten(),
'Upper Bound': bounds[1].flatten()
})
table.round(2).head()
###Output
_____no_output_____
###Markdown
Optimal InstrumentsGiven a consistent estimate of $\theta$, we may want to compute the optimal instruments of :ref:`references:Chamberlain (1987)` and use them to re-solve the problem. Optimal instruments have been shown, for example, by :ref:`references:Reynaert and Verboven (2014)`, to reduce bias, improve efficiency, and enhance stability of BLP estimates.The :meth:`ProblemResults.compute_optimal_instruments` method computes the expected Jacobians that comprise the optimal instruments by integrating over the density of $\xi$ (and $\omega$ if a supply side was estimated). By default, the method approximates this integral by averaging over the Jacobian realizations computed under draws from the asymptotic normal distribution of the error terms. Since this process is computationally expensive and often doesn't make much of a difference, we'll use `method='approximate'` in this example to simply evaluate the Jacobians at the expected value of $\xi$, zero.
###Code
instrument_results = results.compute_optimal_instruments(method='approximate')
instrument_results
###Output
_____no_output_____
###Markdown
We can use the :meth:`OptimalInstrumentResults.to_problem` method to re-create the fake cereal problem with the estimated optimal excluded instruments.
###Code
updated_problem = instrument_results.to_problem()
updated_problem
###Output
_____no_output_____
###Markdown
We can solve this updated problem just like the original one. We'll start at our consistent estimate of $\theta$.
###Code
updated_results = updated_problem.solve(
results.sigma,
results.pi,
optimization=pyblp.Optimization('bfgs', {'gtol': 1e-5}),
method='1s'
)
updated_results
###Output
_____no_output_____
###Markdown
Post-Estimation Tutorial
###Code
%matplotlib inline
import pyblp
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
pyblp.options.digits = 2
pyblp.options.verbose = False
pyblp.__version__
###Output
_____no_output_____
###Markdown
This tutorial covers several features of `pyblp` which are available after estimation including:1. Calculating elasticities and diversion ratios.2. Calculating marginal costs and markups.3. Computing the effects of mergers: prices, shares, and HHI.4. Using a parametric bootstrap to estimate standard errors.5. Estimating optimal instruments. Problem ResultsAs in the [fake cereal tutorial](nevo.ipynb), we'll first solve the fake cereal problem from :ref:`references:Nevo (2000)`. We load the fake data and estimate the model as in the previous tutorial. We output the setup of the model to confirm we have correctly configured the :class:`Problem`
###Code
product_data = pd.read_csv(pyblp.data.NEVO_PRODUCTS_LOCATION)
agent_data = pd.read_csv(pyblp.data.NEVO_AGENTS_LOCATION)
product_formulations = (
pyblp.Formulation('0 + prices', absorb='C(product_ids)'),
pyblp.Formulation('1 + prices + sugar + mushy')
)
agent_formulation = pyblp.Formulation('0 + income + income_squared + age + child')
problem = pyblp.Problem(product_formulations, product_data, agent_formulation, agent_data)
problem
###Output
_____no_output_____
###Markdown
We'll solve the problem in the same way as before. The :meth:`Problem.solve` method returns a :meth:`ProblemResults` class, which displays basic estimation results. The results that are displayed are simply formatted information extracted from various class attributes such as :attr:`ProblemResults.sigma` and :attr:`ProblemResults.sigma_se`.
###Code
initial_sigma = np.diag([0.3302, 2.4526, 0.0163, 0.2441])
initial_pi = [
[ 5.4819, 0, 0.2037, 0 ],
[15.8935, -1.2000, 0, 2.6342],
[-0.2506, 0, 0.0511, 0 ],
[ 1.2650, 0, -0.8091, 0 ]
]
bfgs = pyblp.Optimization('bfgs')
results = problem.solve(
initial_sigma,
initial_pi,
optimization=bfgs,
method='1s'
)
results
###Output
_____no_output_____
###Markdown
Additional post-estimation outputs can be computed with :class:`ProblemResults` methods. Elasticities and Diversion RatiosWe can estimate elasticities, $\varepsilon$, and diversion ratios, $\mathscr{D}$, with :meth:`ProblemResults.compute_elasticities` and :meth:`ProblemResults.compute_diversion_ratios`.As a reminder, elasticities in each market are$$\varepsilon_{jk} = \frac{x_k}{s_j}\frac{\partial s_j}{\partial x_k}.$$Diversion ratios are$$\mathscr{D}_{jk} = -\frac{\partial s_k}{\partial x_j} \Big/ \frac{\partial s_j}{\partial x_j}.$$Following :ref:`references:Conlon and Mortimer (2018)`, we report the diversion to the outside good $D_{j0}$ on the diagonal instead of $D_{jj}=-1$.
###Code
elasticities = results.compute_elasticities()
diversions = results.compute_diversion_ratios()
###Output
_____no_output_____
###Markdown
Post-estimation outputs are computed for each market and stacked. We'll use [matplotlib](https://matplotlib.org/) functions to display the matrices associated with a single market.
###Code
single_market = product_data['market_ids'] == 'C01Q1'
plt.colorbar(plt.matshow(elasticities[single_market]));
plt.colorbar(plt.matshow(diversions[single_market]));
###Output
_____no_output_____
###Markdown
The diagonal of the first image consists of own elasticities and the diagonal of the second image consists of diversion ratios to the outside good. As one might expect, own price elasticities are large and negative while cross-price elasticities are positive but much smaller.Elasticities and diversion ratios can be computed with respect to variables other than `prices` with the `name` argument of :meth:`ProblemResults.compute_elasticities` and :meth:`ProblemResults.compute_diversion_ratios`. Additionally, :meth:`ProblemResults.compute_long_run_diversion_ratios` can be used to used to understand substitution when products are eliminated from the choice set.The convenience methods :meth:`ProblemResults.extract_diagonals` and :meth:`ProblemResults.extract_diagonal_means` can be used to extract information about own elasticities of demand from elasticity matrices.
###Code
means = results.extract_diagonal_means(elasticities)
###Output
_____no_output_____
###Markdown
An alternative to summarizing full elasticity matrices is to use :meth:`ProblemResults.compute_aggregate_elasticities` to estimate aggregate elasticities of demand, $E$, in each market, which reflect the change in total sales under a proportional sales tax of some factor.
###Code
aggregates = results.compute_aggregate_elasticities(factor=0.1)
###Output
_____no_output_____
###Markdown
Since demand for an entire product category is generally less elastic than the average elasticity of individual products, mean own elasticities are generally larger in magnitude than aggregate elasticities.
###Code
plt.hist(
[means.flatten(), aggregates.flatten()],
color=['red', 'blue'],
bins=50
);
plt.legend(['Mean Own Elasticities', 'Aggregate Elasticities']);
###Output
_____no_output_____
###Markdown
Marginal Costs and MarkupsTo compute marginal costs, $c$, the `product_data` passed to :class:`Problem` must have had a `firm_ids` field. Since we included firm IDs when configuring the problem, we can use :meth:`ProblemResults.compute_costs`.
###Code
costs = results.compute_costs()
plt.hist(costs, bins=50);
plt.legend(["Marginal Costs"]);
###Output
_____no_output_____
###Markdown
Other methods that compute supply-side outputs often compute marginal costs themselves. For example, :meth:`ProblemResults.compute_markups` will compute marginal costs when estimating markups, $\mathscr{M}$, but computation can be sped up if we just use our pre-computed values.
###Code
markups = results.compute_markups(costs=costs)
plt.hist(markups, bins=50);
plt.legend(["Markups"]);
###Output
_____no_output_____
###Markdown
MergersBefore computing post-merger outputs, we'll supplement our pre-merger markups with some other outputs. We'll compute Herfindahl-Hirschman Indices, $\text{HHI}$, with :meth:`ProblemResults.compute_hhi`; population-normalized gross expected profits, $\pi$, with :meth:`ProblemResults.compute_profits`; and population-normalized consumer surpluses, $\text{CS}$, with :meth:`ProblemResults.compute_consumer_surpluses`.
###Code
hhi = results.compute_hhi()
profits = results.compute_profits(costs=costs)
cs = results.compute_consumer_surpluses()
###Output
_____no_output_____
###Markdown
To compute post-merger outputs, we'll create a new set of firm IDs that represent a merger of firms ``2`` and ``1``.
###Code
product_data['merger_ids'] = product_data['firm_ids'].replace(2, 1)
###Output
_____no_output_____
###Markdown
We can use :meth:`ProblemResults.compute_approximate_prices` or :meth:`ProblemResults.compute_prices` to estimate post-merger prices. The first method, which is discussed, for example, in :ref:`references:Nevo (1997)`, assumes that shares and their price derivatives are unaffected by the merger. The second method does not make these assumptions and iterates over the $\zeta$-markup equation from :ref:`references:Morrow and Skerlos (2011)` to solve the full system of $J_t$ equations and $J_t$ unknowns in each market $t$. We'll use the latter, since it is fast enough for this example problem.
###Code
changed_prices = results.compute_prices(
firm_ids=product_data['merger_ids'],
costs=costs
)
###Output
_____no_output_____
###Markdown
We'll compute post-merger shares with :meth:`ProblemResults.compute_shares`.
###Code
changed_shares = results.compute_shares(changed_prices)
###Output
_____no_output_____
###Markdown
Post-merger prices and shares are used to compute other post-merger outputs. For example, $\text{HHI}$ increases.
###Code
changed_hhi = results.compute_hhi(
firm_ids=product_data['merger_ids'],
shares=changed_shares
)
plt.hist(changed_hhi - hhi, bins=50);
plt.legend(["HHI Changes"]);
###Output
_____no_output_____
###Markdown
Markups, $\mathscr{M}$, and profits, $\pi$, generally increase as well.
###Code
changed_markups = results.compute_markups(changed_prices, costs)
plt.hist(changed_markups - markups, bins=50);
plt.legend(["Markup Changes"]);
changed_profits = results.compute_profits(changed_prices, changed_shares, costs)
plt.hist(changed_profits - profits, bins=50);
plt.legend(["Profit Changes"]);
###Output
_____no_output_____
###Markdown
On the other hand, consumer surpluses, $\text{CS}$, generally decrease.
###Code
changed_cs = results.compute_consumer_surpluses(changed_prices)
plt.hist(changed_cs - cs, bins=50);
plt.legend(["Consumer Surplus Changes"]);
###Output
_____no_output_____
###Markdown
Bootstrapping ResultsPost-estimation outputs can be informative, but they don't mean much without a sense sample-to-sample variability. One way to estimate confidence intervals for post-estimation outputs is with a standard bootstrap procedure:1. Construct a large number of bootstrap samples by sampling with replacement from the original product data.2. Initialize and solve a :class:`Problem` for each bootstrap sample.3. Compute the desired post-estimation output for each bootstrapped :class:`ProblemResults` and from the resulting empirical distribution, construct boostrap confidence intervals.Although appealing because of its simplicity, the computational resources required for this procedure are often prohibatively expensive. Furthermore, human oversight of the optimization routine is often required to determine whether the routine ran into any problems and if it successfully converged. Human oversight of estimation for each bootstrapped problem is usually not feasible.A more reasonable alternative is a parametric bootstrap procedure:1. Construct a large number of draws from the estimated joint distribution of parameters.2. Compute the implied mean utility, $\delta$, and shares, $s$, for each draw. If a supply side was estimated, also computed the implied marginal costs, $c$, and prices, $p$.3. Compute the desired post-estimation output under each of these parametric bootstrap samples. Again, from the resulting empirical distribution, construct boostrap confidence intervals.Compared to the standard bootstrap procedure, the parametric bootstrap requires far fewer computational resources, and is simple enough to not require human oversight of each bootstrap iteration. The primary complication to this procedure is that when supply is estimated, equilibrium prices and shares need to be computed for each parametric bootstrap sample by iterating over the $\zeta$-markup equation from :ref:`references:Morrow and Skerlos (2011)`. Although nontrivial, this fixed point iteration problem is much less demanding than the full optimization routine required to solve the BLP problem from the start.An empirical distribution of results computed according to this parametric bootstrap procedure can be created with the :meth:`ProblemResults.bootstrap` method, which returns a :class:`BootstrappedResults` class that can be used just like :class:`ProblemResults` to compute various post-estimation outputs. The difference is that :class:`BootstrappedResults` methods return arrays with an extra first dimension, along which bootstrapped results are stacked.We'll construct 90% parametric bootstrap confidence intervals for estimated mean own elasticities in each market of the fake cereal problem. Usually, bootstrapped confidence intervals should be based on thousands of draws, but we'll only use a few for the sake of speed in this example.
###Code
bootstrapped_results = results.bootstrap(draws=100, seed=0)
bootstrapped_results
bounds = np.percentile(
bootstrapped_results.extract_diagonal_means(
bootstrapped_results.compute_elasticities()
),
q=[10, 90],
axis=0
)
table = pd.DataFrame(index=problem.unique_market_ids, data={
'Lower Bound': bounds[0].flatten(),
'Mean Own Elasticity': aggregates.flatten(),
'Upper Bound': bounds[1].flatten()
})
table.round(2).head()
###Output
_____no_output_____
###Markdown
Optimal InstrumentsGiven a consistent estimate of $\theta$, we may want to compute the optimal instruments of :ref:`references:Chamberlain (1987)` and use them to re-solve the problem. Optimal instruments have been shown, for example, by :ref:`references:Reynaert and Verboven (2014)`, to reduce bias, improve efficiency, and enhance stability of BLP estimates.The :meth:`ProblemResults.compute_optimal_instruments` method computes the expected Jacobians that comprise the optimal instruments by integrating over the density of $\xi$ (and $\omega$ if a supply side was estimated). By default, the method approximates this integral by averaging over the Jacobian realizations computed under draws from the asymptotic normal distribution of the error terms. Since this process is computationally expensive and often doesn't make much of a difference, we'll use `method='approximate'` in this example to simply evaluate the Jacobians at the expected value of $\xi$, zero.
###Code
instrument_results = results.compute_optimal_instruments(method='approximate')
instrument_results
###Output
_____no_output_____
###Markdown
We can use the :meth:`OptimalInstrumentResults.to_problem` method to re-create the fake cereal problem with the estimated optimal excluded instruments.
###Code
updated_problem = instrument_results.to_problem()
updated_problem
###Output
_____no_output_____
###Markdown
We can solve this updated problem just like the original one. We'll start at our consistent estimate of $\theta$.
###Code
updated_results = updated_problem.solve(
results.sigma,
results.pi,
optimization=pyblp.Optimization('bfgs'),
method='1s'
)
updated_results
###Output
_____no_output_____
###Markdown
Post-Estimation Tutorial
###Code
%matplotlib inline
import pyblp
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
pyblp.options.digits = 2
pyblp.options.verbose = False
pyblp.__version__
###Output
_____no_output_____
###Markdown
This tutorial covers several features of `pyblp` which are available after estimation including:1. Calculating elasticities and diversion ratios.2. Calculating marginal costs and markups.3. Computing the effects of mergers: prices, shares, and HHI.4. Using a parametric bootstrap to estimate standard errors.5. Estimating optimal instruments. Problem ResultsAs in the [fake cereal tutorial](nevo.ipynb), we'll first solve the fake cereal problem from :ref:`references:Nevo (2000)`. We load the fake data and estimate the model as in the previous tutorial. We output the setup of the model to confirm we have correctly configured the :class:`Problem`
###Code
product_data = pd.read_csv(pyblp.data.NEVO_PRODUCTS_LOCATION)
agent_data = pd.read_csv(pyblp.data.NEVO_AGENTS_LOCATION)
product_formulations = (
pyblp.Formulation('0 + prices', absorb='C(product_ids)'),
pyblp.Formulation('1 + prices + sugar + mushy')
)
agent_formulation = pyblp.Formulation('0 + income + income_squared + age + child')
problem = pyblp.Problem(product_formulations, product_data, agent_formulation, agent_data)
problem
###Output
_____no_output_____
###Markdown
We'll solve the problem in the same way as before. The :meth:`Problem.solve` method returns a :meth:`ProblemResults` class, which displays basic estimation results. The results that are displayed are simply formatted information extracted from various class attributes such as :attr:`ProblemResults.sigma` and :attr:`ProblemResults.sigma_se`.
###Code
initial_sigma = np.diag([0.3302, 2.4526, 0.0163, 0.2441])
initial_pi = [
[ 5.4819, 0, 0.2037, 0 ],
[15.8935, -1.2000, 0, 2.6342],
[-0.2506, 0, 0.0511, 0 ],
[ 1.2650, 0, -0.8091, 0 ]
]
bfgs = pyblp.Optimization('bfgs')
results = problem.solve(
initial_sigma,
initial_pi,
optimization=bfgs,
method='1s'
)
results
###Output
_____no_output_____
###Markdown
Additional post-estimation outputs can be computed with :class:`ProblemResults` methods. Elasticities and Diversion RatiosWe can estimate elasticities, $\varepsilon$, and diversion ratios, $\mathscr{D}$, with :meth:`ProblemResults.compute_elasticities` and :meth:`ProblemResults.compute_diversion_ratios`.As a reminder, elasticities in each market are$$\varepsilon_{jk} = \frac{x_k}{s_j}\frac{\partial s_j}{\partial x_k}.$$Diversion ratios are$$\mathscr{D}_{jk} = -\frac{\partial s_k}{\partial x_j} \Big/ \frac{\partial s_j}{\partial x_j}.$$Following :ref:`references:Conlon and Mortimer (2018)`, we report the diversion to the outside good $D_{j0}$ on the diagonal instead of $D_{jj}=-1$.
###Code
elasticities = results.compute_elasticities()
diversions = results.compute_diversion_ratios()
###Output
_____no_output_____
###Markdown
Post-estimation outputs are computed for each market and stacked. We'll use [matplotlib](https://matplotlib.org/) functions to display the matrices associated with a single market.
###Code
single_market = product_data['market_ids'] == 'C01Q1'
plt.colorbar(plt.matshow(elasticities[single_market]));
plt.colorbar(plt.matshow(diversions[single_market]));
###Output
_____no_output_____
###Markdown
The diagonal of the first image consists of own elasticities and the diagonal of the second image consists of diversion ratios to the outside good. As one might expect, own price elasticities are large and negative while cross-price elasticities are positive but much smaller.Elasticities and diversion ratios can be computed with respect to variables other than `prices` with the `name` argument of :meth:`ProblemResults.compute_elasticities` and :meth:`ProblemResults.compute_diversion_ratios`. Additionally, :meth:`ProblemResults.compute_long_run_diversion_ratios` can be used to used to understand substitution when products are eliminated from the choice set.The convenience methods :meth:`ProblemResults.extract_diagonals` and :meth:`ProblemResults.extract_diagonal_means` can be used to extract information about own elasticities of demand from elasticity matrices.
###Code
means = results.extract_diagonal_means(elasticities)
###Output
_____no_output_____
###Markdown
An alternative to summarizing full elasticity matrices is to use :meth:`ProblemResults.compute_aggregate_elasticities` to estimate aggregate elasticities of demand, $E$, in each market, which reflect the change in total sales under a proportional sales tax of some factor.
###Code
aggregates = results.compute_aggregate_elasticities(factor=0.1)
###Output
_____no_output_____
###Markdown
Since demand for an entire product category is generally less elastic than the average elasticity of individual products, mean own elasticities are generally larger in magnitude than aggregate elasticities.
###Code
plt.hist(
[means.flatten(), aggregates.flatten()],
color=['red', 'blue'],
bins=50
);
plt.legend(['Mean Own Elasticities', 'Aggregate Elasticities']);
###Output
_____no_output_____
###Markdown
Marginal Costs and MarkupsTo compute marginal costs, $c$, the `product_data` passed to :class:`Problem` must have had a `firm_ids` field. Since we included firm IDs when configuring the problem, we can use :meth:`ProblemResults.compute_costs`.
###Code
costs = results.compute_costs()
plt.hist(costs, bins=50);
plt.legend(["Marginal Costs"]);
###Output
_____no_output_____
###Markdown
Other methods that compute supply-side outputs often compute marginal costs themselves. For example, :meth:`ProblemResults.compute_markups` will compute marginal costs when estimating markups, $\mathscr{M}$, but computation can be sped up if we just use our pre-computed values.
###Code
markups = results.compute_markups(costs=costs)
plt.hist(markups, bins=50);
plt.legend(["Markups"]);
###Output
_____no_output_____
###Markdown
MergersBefore computing post-merger outputs, we'll supplement our pre-merger markups with some other outputs. We'll compute Herfindahl-Hirschman Indices, $\text{HHI}$, with :meth:`ProblemResults.compute_hhi`; population-normalized gross expected profits, $\pi$, with :meth:`ProblemResults.compute_profits`; and population-normalized consumer surpluses, $\text{CS}$, with :meth:`ProblemResults.compute_consumer_surpluses`.
###Code
hhi = results.compute_hhi()
profits = results.compute_profits(costs=costs)
cs = results.compute_consumer_surpluses()
###Output
_____no_output_____
###Markdown
To compute post-merger outputs, we'll create a new set of firm IDs that represent a merger of firms ``2`` and ``1``.
###Code
product_data['merger_ids'] = product_data['firm_ids'].replace(2, 1)
###Output
_____no_output_____
###Markdown
We can use :meth:`ProblemResults.compute_approximate_prices` or :meth:`ProblemResults.compute_prices` to estimate post-merger prices. The first method, which is discussed, for example, in :ref:`references:Nevo (1997)`, assumes that shares and their price derivatives are unaffected by the merger. The second method does not make these assumptions and iterates over the $\zeta$-markup equation from :ref:`references:Morrow and Skerlos (2011)` to solve the full system of $J_t$ equations and $J_t$ unknowns in each market $t$. We'll use the latter, since it is fast enough for this example problem.
###Code
changed_prices = results.compute_prices(
firm_ids=product_data['merger_ids'],
costs=costs
)
###Output
_____no_output_____
###Markdown
We'll compute post-merger shares with :meth:`ProblemResults.compute_shares`.
###Code
changed_shares = results.compute_shares(changed_prices)
###Output
_____no_output_____
###Markdown
Post-merger prices and shares are used to compute other post-merger outputs. For example, $\text{HHI}$ increases.
###Code
changed_hhi = results.compute_hhi(
firm_ids=product_data['merger_ids'],
shares=changed_shares
)
plt.hist(changed_hhi - hhi, bins=50);
plt.legend(["HHI Changes"]);
###Output
_____no_output_____
###Markdown
Markups, $\mathscr{M}$, and profits, $\pi$, generally increase as well.
###Code
changed_markups = results.compute_markups(changed_prices, costs)
plt.hist(changed_markups - markups, bins=50);
plt.legend(["Markup Changes"]);
changed_profits = results.compute_profits(changed_prices, changed_shares, costs)
plt.hist(changed_profits - profits, bins=50);
plt.legend(["Profit Changes"]);
###Output
_____no_output_____
###Markdown
On the other hand, consumer surpluses, $\text{CS}$, generally decrease.
###Code
changed_cs = results.compute_consumer_surpluses(changed_prices)
plt.hist(changed_cs - cs, bins=50);
plt.legend(["Consumer Surplus Changes"]);
###Output
_____no_output_____
###Markdown
Bootstrapping ResultsPost-estimation outputs can be informative, but they don't mean much without a sense sample-to-sample variability. One way to estimate confidence intervals for post-estimation outputs is with a standard bootstrap procedure:1. Construct a large number of bootstrap samples by sampling with replacement from the original product data.2. Initialize and solve a :class:`Problem` for each bootstrap sample.3. Compute the desired post-estimation output for each bootstrapped :class:`ProblemResults` and from the resulting empirical distribution, construct boostrap confidence intervals.Although appealing because of its simplicity, the computational resources required for this procedure are often prohibatively expensive. Furthermore, human oversight of the optimization routine is often required to determine whether the routine ran into any problems and if it successfully converged. Human oversight of estimation for each bootstrapped problem is usually not feasible.A more reasonable alternative is a parametric bootstrap procedure:1. Construct a large number of draws from the estimated joint distribution of parameters.2. Compute the implied mean utility, $\delta$, and shares, $s$, for each draw. If a supply side was estimated, also computed the implied marginal costs, $c$, and prices, $p$.3. Compute the desired post-estimation output under each of these parametric bootstrap samples. Again, from the resulting empirical distribution, construct boostrap confidence intervals.Compared to the standard bootstrap procedure, the parametric bootstrap requires far fewer computational resources, and is simple enough to not require human oversight of each bootstrap iteration. The primary complication to this procedure is that when supply is estimated, equilibrium prices and shares need to be computed for each parametric bootstrap sample by iterating over the $\zeta$-markup equation from :ref:`references:Morrow and Skerlos (2011)`. Although nontrivial, this fixed point iteration problem is much less demanding than the full optimization routine required to solve the BLP problem from the start.An empirical distribution of results computed according to this parametric bootstrap procedure can be created with the :meth:`ProblemResults.bootstrap` method, which returns a :class:`BootstrappedResults` class that can be used just like :class:`ProblemResults` to compute various post-estimation outputs. The difference is that :class:`BootstrappedResults` methods return arrays with an extra first dimension, along which bootstrapped results are stacked.We'll construct 90% parametric bootstrap confidence intervals for estimated mean own elasticities in each market of the fake cereal problem. Usually, bootstrapped confidence intervals should be based on thousands of draws, but we'll only use a few for the sake of speed in this example.
###Code
bootstrapped_results = results.bootstrap(draws=100, seed=0)
bootstrapped_results
bounds = np.percentile(
bootstrapped_results.extract_diagonal_means(
bootstrapped_results.compute_elasticities()
),
q=[10, 90],
axis=0
)
table = pd.DataFrame(index=problem.unique_market_ids, data={
'Lower Bound': bounds[0].flatten(),
'Mean Own Elasticity': aggregates.flatten(),
'Upper Bound': bounds[1].flatten()
})
table.round(2).head()
###Output
_____no_output_____
###Markdown
Optimal InstrumentsGiven a consistent estimate of $\theta$, we may want to compute the optimal instruments of :ref:`references:Chamberlain (1987)` and use them to re-solve the problem. Optimal instruments have been shown, for example, by :ref:`references:Reynaert and Verboven (2014)`, to reduce bias, improve efficiency, and enhance stability of BLP estimates.The :meth:`ProblemResults.compute_optimal_instruments` method computes the expected Jacobians that comprise the optimal instruments by integrating over the density of $\xi$ (and $\omega$ if a supply side was estimated). By default, the method approximates this integral by averaging over the Jacobian realizations computed under draws from the asymptotic normal distribution of the error terms. Since this process is computationally expensive and often doesn't make much of a difference, we'll use `method='approximate'` in this example to simply evaluate the Jacobians at the expected value of $\xi$, zero.
###Code
instrument_results = results.compute_optimal_instruments(method='approximate')
instrument_results
###Output
_____no_output_____
###Markdown
We can use the :meth:`OptimalInstrumentResults.to_problem` method to re-create the fake cereal problem with the estimated optimal excluded instruments.
###Code
updated_problem = instrument_results.to_problem()
updated_problem
###Output
_____no_output_____
###Markdown
We can solve this updated problem just like the original one. We'll start at our consistent estimate of $\theta$.
###Code
updated_results = updated_problem.solve(
results.sigma,
results.pi,
optimization=pyblp.Optimization('bfgs'),
method='1s'
)
updated_results
###Output
_____no_output_____
###Markdown
Post-Estimation Tutorial
###Code
%matplotlib inline
import pyblp
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
pyblp.options.digits = 2
pyblp.options.verbose = False
pyblp.__version__
###Output
_____no_output_____
###Markdown
This tutorial covers several features of `pyblp` which are available after estimation including:1. Calculating elasticities and diversion ratios.2. Calculating marginal costs and markups.3. Computing the effects of mergers: prices, shares, and HHI.4. Using a parametric bootstrap to estimate standard errors.5. Estimating optimal instruments. Problem ResultsAs in the [fake cereal tutorial](nevo.ipynb), we'll first solve the fake cereal problem from :ref:`references:Nevo (2000)`. We load the fake data and estimate the model as in the previous tutorial. We output the setup of the model to confirm we have correctly configured the :class:`Problem`
###Code
product_data = pd.read_csv(pyblp.data.NEVO_PRODUCTS_LOCATION)
agent_data = pd.read_csv(pyblp.data.NEVO_AGENTS_LOCATION)
product_formulations = (
pyblp.Formulation('0 + prices', absorb='C(product_ids)'),
pyblp.Formulation('1 + prices + sugar + mushy')
)
agent_formulation = pyblp.Formulation('0 + income + income_squared + age + child')
problem = pyblp.Problem(product_formulations, product_data, agent_formulation, agent_data)
problem
###Output
_____no_output_____
###Markdown
We'll solve the problem in the same way as before. The :meth:`Problem.solve` method returns a :meth:`ProblemResults` class, which displays basic estimation results. The results that are displayed are simply formatted information extracted from various class attributes such as :attr:`ProblemResults.sigma` and :attr:`ProblemResults.sigma_se`.
###Code
initial_sigma = np.diag([0.3302, 2.4526, 0.0163, 0.2441])
initial_pi = [
[ 5.4819, 0, 0.2037, 0 ],
[15.8935, -1.2000, 0, 2.6342],
[-0.2506, 0, 0.0511, 0 ],
[ 1.2650, 0, -0.8091, 0 ]
]
results = problem.solve(
initial_sigma,
initial_pi,
optimization=pyblp.Optimization('bfgs', {'gtol': 1e-5}),
method='1s'
)
results
###Output
_____no_output_____
###Markdown
Additional post-estimation outputs can be computed with :class:`ProblemResults` methods. Elasticities and Diversion RatiosWe can estimate elasticities, $\varepsilon$, and diversion ratios, $\mathscr{D}$, with :meth:`ProblemResults.compute_elasticities` and :meth:`ProblemResults.compute_diversion_ratios`.As a reminder, elasticities in each market are$$\varepsilon_{jk} = \frac{x_k}{s_j}\frac{\partial s_j}{\partial x_k}.$$Diversion ratios are$$\mathscr{D}_{jk} = -\frac{\partial s_k}{\partial x_j} \Big/ \frac{\partial s_j}{\partial x_j}.$$Following :ref:`references:Conlon and Mortimer (2018)`, we report the diversion to the outside good $D_{j0}$ on the diagonal instead of $D_{jj}=-1$.
###Code
elasticities = results.compute_elasticities()
diversions = results.compute_diversion_ratios()
###Output
_____no_output_____
###Markdown
Post-estimation outputs are computed for each market and stacked. We'll use [matplotlib](https://matplotlib.org/) functions to display the matrices associated with a single market.
###Code
single_market = product_data['market_ids'] == 'C01Q1'
plt.colorbar(plt.matshow(elasticities[single_market]));
plt.colorbar(plt.matshow(diversions[single_market]));
###Output
_____no_output_____
###Markdown
The diagonal of the first image consists of own elasticities and the diagonal of the second image consists of diversion ratios to the outside good. As one might expect, own price elasticities are large and negative while cross-price elasticities are positive but much smaller.Elasticities and diversion ratios can be computed with respect to variables other than `prices` with the `name` argument of :meth:`ProblemResults.compute_elasticities` and :meth:`ProblemResults.compute_diversion_ratios`. Additionally, :meth:`ProblemResults.compute_long_run_diversion_ratios` can be used to used to understand substitution when products are eliminated from the choice set.The convenience methods :meth:`ProblemResults.extract_diagonals` and :meth:`ProblemResults.extract_diagonal_means` can be used to extract information about own elasticities of demand from elasticity matrices.
###Code
means = results.extract_diagonal_means(elasticities)
###Output
_____no_output_____
###Markdown
An alternative to summarizing full elasticity matrices is to use :meth:`ProblemResults.compute_aggregate_elasticities` to estimate aggregate elasticities of demand, $E$, in each market, which reflect the change in total sales under a proportional sales tax of some factor.
###Code
aggregates = results.compute_aggregate_elasticities(factor=0.1)
###Output
_____no_output_____
###Markdown
Since demand for an entire product category is generally less elastic than the average elasticity of individual products, mean own elasticities are generally larger in magnitude than aggregate elasticities.
###Code
plt.hist(
[means.flatten(), aggregates.flatten()],
color=['red', 'blue'],
bins=50
);
plt.legend(['Mean Own Elasticities', 'Aggregate Elasticities']);
###Output
_____no_output_____
###Markdown
Marginal Costs and MarkupsTo compute marginal costs, $c$, the `product_data` passed to :class:`Problem` must have had a `firm_ids` field. Since we included firm IDs when configuring the problem, we can use :meth:`ProblemResults.compute_costs`.
###Code
costs = results.compute_costs()
plt.hist(costs, bins=50);
plt.legend(["Marginal Costs"]);
###Output
_____no_output_____
###Markdown
Other methods that compute supply-side outputs often compute marginal costs themselves. For example, :meth:`ProblemResults.compute_markups` will compute marginal costs when estimating markups, $\mathscr{M}$, but computation can be sped up if we just use our pre-computed values.
###Code
markups = results.compute_markups(costs=costs)
plt.hist(markups, bins=50);
plt.legend(["Markups"]);
###Output
_____no_output_____
###Markdown
Post-Estimation Tutorial
###Code
%matplotlib inline
import pyblp
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
pyblp.options.digits = 2
pyblp.options.verbose = False
pyblp.__version__
###Output
_____no_output_____
###Markdown
This tutorial covers several features of `pyblp` which are available after estimation including:1. Calculating elasticities and diversion ratios.2. Calculating marginal costs and markups.3. Computing the effects of mergers: prices, shares, and HHI.4. Using a parametric bootstrap to estimate standard errors.5. Estimating optimal instruments. Problem ResultsAs in the [fake cereal tutorial](nevo.ipynb), we'll first solve the fake cereal problem from :ref:`references:Nevo (2000)`. We load the fake data and estimate the model as in the previous tutorial. We output the setup of the model to confirm we have correctly configured the :class:`Problem`
###Code
product_data = pd.read_csv(pyblp.data.NEVO_PRODUCTS_LOCATION)
agent_data = pd.read_csv(pyblp.data.NEVO_AGENTS_LOCATION)
product_formulations = (
pyblp.Formulation('0 + prices', absorb='C(product_ids)'),
pyblp.Formulation('1 + prices + sugar + mushy')
)
agent_formulation = pyblp.Formulation('0 + income + income_squared + age + child')
problem = pyblp.Problem(product_formulations, product_data, agent_formulation, agent_data)
problem
###Output
_____no_output_____
###Markdown
We'll solve the problem in the same way as before. The :meth:`Problem.solve` method returns a :meth:`ProblemResults` class, which displays basic estimation results. The results that are displayed are simply formatted information extracted from various class attributes such as :attr:`ProblemResults.sigma` and :attr:`ProblemResults.sigma_se`.
###Code
initial_sigma = np.diag([0.3302, 2.4526, 0.0163, 0.2441])
initial_pi = [
[ 5.4819, 0, 0.2037, 0 ],
[15.8935, -1.2000, 0, 2.6342],
[-0.2506, 0, 0.0511, 0 ],
[ 1.2650, 0, -0.8091, 0 ]
]
results = problem.solve(
initial_sigma,
initial_pi,
optimization=pyblp.Optimization('bfgs', {'gtol': 1e-5}),
method='1s'
)
results
###Output
_____no_output_____
###Markdown
Additional post-estimation outputs can be computed with :class:`ProblemResults` methods. Elasticities and Diversion RatiosWe can estimate elasticities, $\varepsilon$, and diversion ratios, $\mathscr{D}$, with :meth:`ProblemResults.compute_elasticities` and :meth:`ProblemResults.compute_diversion_ratios`.As a reminder, elasticities in each market are$$\varepsilon_{jk} = \frac{x_k}{s_j}\frac{\partial s_j}{\partial x_k}.$$Diversion ratios are$$\mathscr{D}_{jk} = -\frac{\partial s_k}{\partial x_j} \Big/ \frac{\partial s_j}{\partial x_j}.$$Following :ref:`references:Conlon and Mortimer (2018)`, we report the diversion to the outside good $D_{j0}$ on the diagonal instead of $D_{jj}=-1$.
###Code
elasticities = results.compute_elasticities()
diversions = results.compute_diversion_ratios()
###Output
_____no_output_____
###Markdown
Post-estimation outputs are computed for each market and stacked. We'll use [matplotlib](https://matplotlib.org/) functions to display the matrices associated with a single market.
###Code
single_market = product_data['market_ids'] == 'C01Q1'
plt.colorbar(plt.matshow(elasticities[single_market]));
plt.colorbar(plt.matshow(diversions[single_market]));
###Output
_____no_output_____
###Markdown
The diagonal of the first image consists of own elasticities and the diagonal of the second image consists of diversion ratios to the outside good. As one might expect, own price elasticities are large and negative while cross-price elasticities are positive but much smaller.Elasticities and diversion ratios can be computed with respect to variables other than `prices` with the `name` argument of :meth:`ProblemResults.compute_elasticities` and :meth:`ProblemResults.compute_diversion_ratios`. Additionally, :meth:`ProblemResults.compute_long_run_diversion_ratios` can be used to used to understand substitution when products are eliminated from the choice set.The convenience methods :meth:`ProblemResults.extract_diagonals` and :meth:`ProblemResults.extract_diagonal_means` can be used to extract information about own elasticities of demand from elasticity matrices.
###Code
means = results.extract_diagonal_means(elasticities)
###Output
_____no_output_____
###Markdown
An alternative to summarizing full elasticity matrices is to use :meth:`ProblemResults.compute_aggregate_elasticities` to estimate aggregate elasticities of demand, $E$, in each market, which reflect the change in total sales under a proportional sales tax of some factor.
###Code
aggregates = results.compute_aggregate_elasticities(factor=0.1)
###Output
_____no_output_____
###Markdown
Since demand for an entire product category is generally less elastic than the average elasticity of individual products, mean own elasticities are generally larger in magnitude than aggregate elasticities.
###Code
plt.hist(
[means.flatten(), aggregates.flatten()],
color=['red', 'blue'],
bins=50
);
plt.legend(['Mean Own Elasticities', 'Aggregate Elasticities']);
###Output
_____no_output_____
###Markdown
Marginal Costs and MarkupsTo compute marginal costs, $c$, the `product_data` passed to :class:`Problem` must have had a `firm_ids` field. Since we included firm IDs when configuring the problem, we can use :meth:`ProblemResults.compute_costs`.
###Code
costs = results.compute_costs()
plt.hist(costs, bins=50);
plt.legend(["Marginal Costs"]);
###Output
_____no_output_____
###Markdown
Other methods that compute supply-side outputs often compute marginal costs themselves. For example, :meth:`ProblemResults.compute_markups` will compute marginal costs when estimating markups, $\mathscr{M}$, but computation can be sped up if we just use our pre-computed values.
###Code
markups = results.compute_markups(costs=costs)
plt.hist(markups, bins=50);
plt.legend(["Markups"]);
###Output
_____no_output_____
###Markdown
MergersBefore computing post-merger outputs, we'll supplement our pre-merger markups with some other outputs. We'll compute Herfindahl-Hirschman Indices, $\text{HHI}$, with :meth:`ProblemResults.compute_hhi`; population-normalized gross expected profits, $\pi$, with :meth:`ProblemResults.compute_profits`; and population-normalized consumer surpluses, $\text{CS}$, with :meth:`ProblemResults.compute_consumer_surpluses`.
###Code
hhi = results.compute_hhi()
profits = results.compute_profits(costs=costs)
cs = results.compute_consumer_surpluses()
###Output
_____no_output_____
###Markdown
To compute post-merger outputs, we'll create a new set of firm IDs that represent a merger of firms ``2`` and ``1``.
###Code
product_data['merger_ids'] = product_data['firm_ids'].replace(2, 1)
###Output
_____no_output_____
###Markdown
We can use :meth:`ProblemResults.compute_approximate_prices` or :meth:`ProblemResults.compute_prices` to estimate post-merger prices. The first method, which is discussed, for example, in :ref:`references:Nevo (1997)`, assumes that shares and their price derivatives are unaffected by the merger. The second method does not make these assumptions and iterates over the $\zeta$-markup equation from :ref:`references:Morrow and Skerlos (2011)` to solve the full system of $J_t$ equations and $J_t$ unknowns in each market $t$. We'll use the latter, since it is fast enough for this example problem.
###Code
changed_prices = results.compute_prices(
firm_ids=product_data['merger_ids'],
costs=costs
)
###Output
_____no_output_____
###Markdown
We'll compute post-merger shares with :meth:`ProblemResults.compute_shares`.
###Code
changed_shares = results.compute_shares(changed_prices)
###Output
_____no_output_____
###Markdown
Post-merger prices and shares are used to compute other post-merger outputs. For example, $\text{HHI}$ increases.
###Code
changed_hhi = results.compute_hhi(
firm_ids=product_data['merger_ids'],
shares=changed_shares
)
plt.hist(changed_hhi - hhi, bins=50);
plt.legend(["HHI Changes"]);
###Output
_____no_output_____
###Markdown
Markups, $\mathscr{M}$, and profits, $\pi$, generally increase as well.
###Code
changed_markups = results.compute_markups(changed_prices, costs)
plt.hist(changed_markups - markups, bins=50);
plt.legend(["Markup Changes"]);
changed_profits = results.compute_profits(changed_prices, changed_shares, costs)
plt.hist(changed_profits - profits, bins=50);
plt.legend(["Profit Changes"]);
###Output
_____no_output_____
###Markdown
On the other hand, consumer surpluses, $\text{CS}$, generally decrease.
###Code
changed_cs = results.compute_consumer_surpluses(changed_prices)
plt.hist(changed_cs - cs, bins=50);
plt.legend(["Consumer Surplus Changes"]);
###Output
_____no_output_____
###Markdown
Bootstrapping ResultsPost-estimation outputs can be informative, but they don't mean much without a sense sample-to-sample variability. One way to estimate confidence intervals for post-estimation outputs is with a standard bootstrap procedure:1. Construct a large number of bootstrap samples by sampling with replacement from the original product data.2. Initialize and solve a :class:`Problem` for each bootstrap sample.3. Compute the desired post-estimation output for each bootstrapped :class:`ProblemResults` and from the resulting empirical distribution, construct boostrap confidence intervals.Although appealing because of its simplicity, the computational resources required for this procedure are often prohibitively expensive. Furthermore, human oversight of the optimization routine is often required to determine whether the routine ran into any problems and if it successfully converged. Human oversight of estimation for each bootstrapped problem is usually not feasible.A more reasonable alternative is a parametric bootstrap procedure:1. Construct a large number of draws from the estimated joint distribution of parameters.2. Compute the implied mean utility, $\delta$, and shares, $s$, for each draw. If a supply side was estimated, also computed the implied marginal costs, $c$, and prices, $p$.3. Compute the desired post-estimation output under each of these parametric bootstrap samples. Again, from the resulting empirical distribution, construct boostrap confidence intervals.Compared to the standard bootstrap procedure, the parametric bootstrap requires far fewer computational resources, and is simple enough to not require human oversight of each bootstrap iteration. The primary complication to this procedure is that when supply is estimated, equilibrium prices and shares need to be computed for each parametric bootstrap sample by iterating over the $\zeta$-markup equation from :ref:`references:Morrow and Skerlos (2011)`. Although nontrivial, this fixed point iteration problem is much less demanding than the full optimization routine required to solve the BLP problem from the start.An empirical distribution of results computed according to this parametric bootstrap procedure can be created with the :meth:`ProblemResults.bootstrap` method, which returns a :class:`BootstrappedResults` class that can be used just like :class:`ProblemResults` to compute various post-estimation outputs. The difference is that :class:`BootstrappedResults` methods return arrays with an extra first dimension, along which bootstrapped results are stacked.We'll construct 90% parametric bootstrap confidence intervals for estimated mean own elasticities in each market of the fake cereal problem. Usually, bootstrapped confidence intervals should be based on thousands of draws, but we'll only use a few for the sake of speed in this example.
###Code
bootstrapped_results = results.bootstrap(draws=100, seed=0)
bootstrapped_results
bounds = np.percentile(
bootstrapped_results.extract_diagonal_means(
bootstrapped_results.compute_elasticities()
),
q=[10, 90],
axis=0
)
table = pd.DataFrame(index=problem.unique_market_ids, data={
'Lower Bound': bounds[0].flatten(),
'Mean Own Elasticity': means.flatten(),
'Upper Bound': bounds[1].flatten()
})
table.round(2).head()
###Output
_____no_output_____
###Markdown
Optimal InstrumentsGiven a consistent estimate of $\theta$, we may want to compute the optimal instruments of :ref:`references:Chamberlain (1987)` and use them to re-solve the problem. Optimal instruments have been shown, for example, by :ref:`references:Reynaert and Verboven (2014)`, to reduce bias, improve efficiency, and enhance stability of BLP estimates.The :meth:`ProblemResults.compute_optimal_instruments` method computes the expected Jacobians that comprise the optimal instruments by integrating over the density of $\xi$ (and $\omega$ if a supply side was estimated). By default, the method approximates this integral by averaging over the Jacobian realizations computed under draws from the asymptotic normal distribution of the error terms. Since this process is computationally expensive and often doesn't make much of a difference, we'll use `method='approximate'` in this example to simply evaluate the Jacobians at the expected value of $\xi$, zero.
###Code
instrument_results = results.compute_optimal_instruments(method='approximate')
instrument_results
###Output
_____no_output_____
###Markdown
We can use the :meth:`OptimalInstrumentResults.to_problem` method to re-create the fake cereal problem with the estimated optimal excluded instruments.
###Code
updated_problem = instrument_results.to_problem()
updated_problem
###Output
_____no_output_____
###Markdown
We can solve this updated problem just like the original one. We'll start at our consistent estimate of $\theta$.
###Code
updated_results = updated_problem.solve(
results.sigma,
results.pi,
optimization=pyblp.Optimization('bfgs', {'gtol': 1e-5}),
method='1s'
)
updated_results
###Output
_____no_output_____
###Markdown
Post-Estimation Tutorial
###Code
%matplotlib inline
import pyblp
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
pyblp.options.digits = 2
pyblp.options.verbose = False
pyblp.__version__
###Output
_____no_output_____
###Markdown
This tutorial covers several features of `pyblp` which are available after estimation including:1. Calculating elasticities and diversion ratios.2. Calculating marginal costs and markups.3. Computing the effects of mergers: prices, shares, and HHI.4. Using a parametric bootstrap to estimate standard errors.5. Estimating optimal instruments. Problem ResultsAs in the [fake cereal tutorial](nevo.ipynb), we'll first solve the fake cereal problem from :ref:`references:Nevo (2000)`. We load the fake data and estimate the model as in the previous tutorial. We output the setup of the model to confirm we have correctly configured the :class:`Problem`
###Code
product_data = pd.read_csv(pyblp.data.NEVO_PRODUCTS_LOCATION)
agent_data = pd.read_csv(pyblp.data.NEVO_AGENTS_LOCATION)
product_formulations = (
pyblp.Formulation('0 + prices', absorb='C(product_ids)'),
pyblp.Formulation('1 + prices + sugar + mushy')
)
agent_formulation = pyblp.Formulation('0 + income + income_squared + age + child')
problem = pyblp.Problem(product_formulations, product_data, agent_formulation, agent_data)
problem
###Output
_____no_output_____
###Markdown
We'll solve the problem in the same way as before. The :meth:`Problem.solve` method returns a :meth:`ProblemResults` class, which displays basic estimation results. The results that are displayed are simply formatted information extracted from various class attributes such as :attr:`ProblemResults.sigma` and :attr:`ProblemResults.sigma_se`.
###Code
initial_sigma = np.diag([0.3302, 2.4526, 0.0163, 0.2441])
initial_pi = [
[ 5.4819, 0, 0.2037, 0 ],
[15.8935, -1.2000, 0, 2.6342],
[-0.2506, 0, 0.0511, 0 ],
[ 1.2650, 0, -0.8091, 0 ]
]
results = problem.solve(
initial_sigma,
initial_pi,
optimization=pyblp.Optimization('bfgs', {'gtol': 1e-5}),
method='1s'
)
results
###Output
_____no_output_____
###Markdown
Additional post-estimation outputs can be computed with :class:`ProblemResults` methods. Elasticities and Diversion RatiosWe can estimate elasticities, $\varepsilon$, and diversion ratios, $\mathscr{D}$, with :meth:`ProblemResults.compute_elasticities` and :meth:`ProblemResults.compute_diversion_ratios`.As a reminder, elasticities in each market are$$\varepsilon_{jk} = \frac{x_k}{s_j}\frac{\partial s_j}{\partial x_k}.$$Diversion ratios are$$\mathscr{D}_{jk} = -\frac{\partial s_k}{\partial x_j} \Big/ \frac{\partial s_j}{\partial x_j}.$$Following :ref:`references:Conlon and Mortimer (2018)`, we report the diversion to the outside good $D_{j0}$ on the diagonal instead of $D_{jj}=-1$.
###Code
elasticities = results.compute_elasticities()
diversions = results.compute_diversion_ratios()
###Output
_____no_output_____
###Markdown
Post-estimation outputs are computed for each market and stacked. We'll use [matplotlib](https://matplotlib.org/) functions to display the matrices associated with a single market.
###Code
single_market = product_data['market_ids'] == 'C01Q1'
plt.colorbar(plt.matshow(elasticities[single_market]));
plt.colorbar(plt.matshow(diversions[single_market]));
###Output
_____no_output_____
###Markdown
The diagonal of the first image consists of own elasticities and the diagonal of the second image consists of diversion ratios to the outside good. As one might expect, own price elasticities are large and negative while cross-price elasticities are positive but much smaller.Elasticities and diversion ratios can be computed with respect to variables other than `prices` with the `name` argument of :meth:`ProblemResults.compute_elasticities` and :meth:`ProblemResults.compute_diversion_ratios`. Additionally, :meth:`ProblemResults.compute_long_run_diversion_ratios` can be used to used to understand substitution when products are eliminated from the choice set.The convenience methods :meth:`ProblemResults.extract_diagonals` and :meth:`ProblemResults.extract_diagonal_means` can be used to extract information about own elasticities of demand from elasticity matrices.
###Code
means = results.extract_diagonal_means(elasticities)
###Output
_____no_output_____
###Markdown
An alternative to summarizing full elasticity matrices is to use :meth:`ProblemResults.compute_aggregate_elasticities` to estimate aggregate elasticities of demand, $E$, in each market, which reflect the change in total sales under a proportional sales tax of some factor.
###Code
aggregates = results.compute_aggregate_elasticities(factor=0.1)
###Output
_____no_output_____
###Markdown
Since demand for an entire product category is generally less elastic than the average elasticity of individual products, mean own elasticities are generally larger in magnitude than aggregate elasticities.
###Code
plt.hist(
[means.flatten(), aggregates.flatten()],
color=['red', 'blue'],
bins=50
);
plt.legend(['Mean Own Elasticities', 'Aggregate Elasticities']);
###Output
_____no_output_____
###Markdown
Marginal Costs and MarkupsTo compute marginal costs, $c$, the `product_data` passed to :class:`Problem` must have had a `firm_ids` field. Since we included firm IDs when configuring the problem, we can use :meth:`ProblemResults.compute_costs`.
###Code
costs = results.compute_costs()
plt.hist(costs, bins=50);
plt.legend(["Marginal Costs"]);
###Output
_____no_output_____
###Markdown
Other methods that compute supply-side outputs often compute marginal costs themselves. For example, :meth:`ProblemResults.compute_markups` will compute marginal costs when estimating markups, $\mathscr{M}$, but computation can be sped up if we just use our pre-computed values.
###Code
markups = results.compute_markups(costs=costs)
plt.hist(markups, bins=50);
plt.legend(["Markups"]);
###Output
_____no_output_____
###Markdown
MergersBefore computing post-merger outputs, we'll supplement our pre-merger markups with some other outputs. We'll compute Herfindahl-Hirschman Indices, $\text{HHI}$, with :meth:`ProblemResults.compute_hhi`; population-normalized gross expected profits, $\pi$, with :meth:`ProblemResults.compute_profits`; and population-normalized consumer surpluses, $\text{CS}$, with :meth:`ProblemResults.compute_consumer_surpluses`.
###Code
hhi = results.compute_hhi()
profits = results.compute_profits(costs=costs)
cs = results.compute_consumer_surpluses()
###Output
_____no_output_____
###Markdown
To compute post-merger outputs, we'll create a new set of firm IDs that represent a merger of firms ``2`` and ``1``.
###Code
product_data['merger_ids'] = product_data['firm_ids'].replace(2, 1)
###Output
_____no_output_____
###Markdown
We can use :meth:`ProblemResults.compute_approximate_prices` or :meth:`ProblemResults.compute_prices` to estimate post-merger prices. The first method, which is discussed, for example, in :ref:`references:Nevo (1997)`, assumes that shares and their price derivatives are unaffected by the merger. The second method does not make these assumptions and iterates over the $\zeta$-markup equation from :ref:`references:Morrow and Skerlos (2011)` to solve the full system of $J_t$ equations and $J_t$ unknowns in each market $t$. We'll use the latter, since it is fast enough for this example problem.
###Code
changed_prices = results.compute_prices(
firm_ids=product_data['merger_ids'],
costs=costs
)
###Output
_____no_output_____
###Markdown
We'll compute post-merger shares with :meth:`ProblemResults.compute_shares`.
###Code
changed_shares = results.compute_shares(changed_prices)
###Output
_____no_output_____
###Markdown
Post-merger prices and shares are used to compute other post-merger outputs. For example, $\text{HHI}$ increases.
###Code
changed_hhi = results.compute_hhi(
firm_ids=product_data['merger_ids'],
shares=changed_shares
)
plt.hist(changed_hhi - hhi, bins=50);
plt.legend(["HHI Changes"]);
###Output
_____no_output_____
###Markdown
Markups, $\mathscr{M}$, and profits, $\pi$, generally increase as well.
###Code
changed_markups = results.compute_markups(changed_prices, costs)
plt.hist(changed_markups - markups, bins=50);
plt.legend(["Markup Changes"]);
changed_profits = results.compute_profits(changed_prices, changed_shares, costs)
plt.hist(changed_profits - profits, bins=50);
plt.legend(["Profit Changes"]);
###Output
_____no_output_____
###Markdown
On the other hand, consumer surpluses, $\text{CS}$, generally decrease.
###Code
changed_cs = results.compute_consumer_surpluses(changed_prices)
plt.hist(changed_cs - cs, bins=50);
plt.legend(["Consumer Surplus Changes"]);
###Output
_____no_output_____
###Markdown
Bootstrapping ResultsPost-estimation outputs can be informative, but they don't mean much without a sense sample-to-sample variability. One way to estimate confidence intervals for post-estimation outputs is with a standard bootstrap procedure:1. Construct a large number of bootstrap samples by sampling with replacement from the original product data.2. Initialize and solve a :class:`Problem` for each bootstrap sample.3. Compute the desired post-estimation output for each bootstrapped :class:`ProblemResults` and from the resulting empirical distribution, construct boostrap confidence intervals.Although appealing because of its simplicity, the computational resources required for this procedure are often prohibitively expensive. Furthermore, human oversight of the optimization routine is often required to determine whether the routine ran into any problems and if it successfully converged. Human oversight of estimation for each bootstrapped problem is usually not feasible.A more reasonable alternative is a parametric bootstrap procedure:1. Construct a large number of draws from the estimated joint distribution of parameters.2. Compute the implied mean utility, $\delta$, and shares, $s$, for each draw. If a supply side was estimated, also computed the implied marginal costs, $c$, and prices, $p$.3. Compute the desired post-estimation output under each of these parametric bootstrap samples. Again, from the resulting empirical distribution, construct boostrap confidence intervals.Compared to the standard bootstrap procedure, the parametric bootstrap requires far fewer computational resources, and is simple enough to not require human oversight of each bootstrap iteration. The primary complication to this procedure is that when supply is estimated, equilibrium prices and shares need to be computed for each parametric bootstrap sample by iterating over the $\zeta$-markup equation from :ref:`references:Morrow and Skerlos (2011)`. Although nontrivial, this fixed point iteration problem is much less demanding than the full optimization routine required to solve the BLP problem from the start.An empirical distribution of results computed according to this parametric bootstrap procedure can be created with the :meth:`ProblemResults.bootstrap` method, which returns a :class:`BootstrappedResults` class that can be used just like :class:`ProblemResults` to compute various post-estimation outputs. The difference is that :class:`BootstrappedResults` methods return arrays with an extra first dimension, along which bootstrapped results are stacked.We'll construct 90% parametric bootstrap confidence intervals for estimated mean own elasticities in each market of the fake cereal problem. Usually, bootstrapped confidence intervals should be based on thousands of draws, but we'll only use a few for the sake of speed in this example.
###Code
bootstrapped_results = results.bootstrap(draws=100, seed=0)
bootstrapped_results
bounds = np.percentile(
bootstrapped_results.extract_diagonal_means(
bootstrapped_results.compute_elasticities()
),
q=[10, 90],
axis=0
)
table = pd.DataFrame(index=problem.unique_market_ids, data={
'Lower Bound': bounds[0].flatten(),
'Mean Own Elasticity': aggregates.flatten(),
'Upper Bound': bounds[1].flatten()
})
table.round(2).head()
###Output
_____no_output_____
###Markdown
Optimal InstrumentsGiven a consistent estimate of $\theta$, we may want to compute the optimal instruments of :ref:`references:Chamberlain (1987)` and use them to re-solve the problem. Optimal instruments have been shown, for example, by :ref:`references:Reynaert and Verboven (2014)`, to reduce bias, improve efficiency, and enhance stability of BLP estimates.The :meth:`ProblemResults.compute_optimal_instruments` method computes the expected Jacobians that comprise the optimal instruments by integrating over the density of $\xi$ (and $\omega$ if a supply side was estimated). By default, the method approximates this integral by averaging over the Jacobian realizations computed under draws from the asymptotic normal distribution of the error terms. Since this process is computationally expensive and often doesn't make much of a difference, we'll use `method='approximate'` in this example to simply evaluate the Jacobians at the expected value of $\xi$, zero.
###Code
instrument_results = results.compute_optimal_instruments(method='approximate')
instrument_results
###Output
_____no_output_____
###Markdown
We can use the :meth:`OptimalInstrumentResults.to_problem` method to re-create the fake cereal problem with the estimated optimal excluded instruments.
###Code
updated_problem = instrument_results.to_problem()
updated_problem
###Output
_____no_output_____
###Markdown
We can solve this updated problem just like the original one. We'll start at our consistent estimate of $\theta$.
###Code
updated_results = updated_problem.solve(
results.sigma,
results.pi,
optimization=pyblp.Optimization('bfgs', {'gtol': 1e-5}),
method='1s'
)
updated_results
###Output
_____no_output_____
###Markdown
Post-Estimation Tutorial
###Code
%matplotlib inline
import pyblp
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
pyblp.options.digits = 2
pyblp.options.verbose = False
pyblp.__version__
###Output
_____no_output_____
###Markdown
This tutorial covers several features of `pyblp` which are available after estimation including:1. Calculating elasticities and diversion ratios.2. Calculating marginal costs and markups.3. Computing the effects of mergers: prices, shares, and HHI.4. Using a parametric bootstrap to estimate standard errors.5. Estimating optimal instruments. Problem ResultsAs in the [fake cereal tutorial](nevo.ipynb), we'll first solve the fake cereal problem from :ref:`references:Nevo (2000)`. We load the fake data and estimate the model as in the previous tutorial. We output the setup of the model to confirm we have correctly configured the :class:`Problem`
###Code
product_data = pd.read_csv(pyblp.data.NEVO_PRODUCTS_LOCATION)
agent_data = pd.read_csv(pyblp.data.NEVO_AGENTS_LOCATION)
product_formulations = (
pyblp.Formulation('0 + prices', absorb='C(product_ids)'),
pyblp.Formulation('1 + prices + sugar + mushy')
)
agent_formulation = pyblp.Formulation('0 + income + income_squared + age + child')
problem = pyblp.Problem(product_formulations, product_data, agent_formulation, agent_data)
problem
###Output
_____no_output_____
###Markdown
We'll solve the problem in the same way as before. The :meth:`Problem.solve` method returns a :meth:`ProblemResults` class, which displays basic estimation results. The results that are displayed are simply formatted information extracted from various class attributes such as :attr:`ProblemResults.sigma` and :attr:`ProblemResults.sigma_se`.
###Code
initial_sigma = np.diag([0.3302, 2.4526, 0.0163, 0.2441])
initial_pi = [
[ 5.4819, 0, 0.2037, 0 ],
[15.8935, -1.2000, 0, 2.6342],
[-0.2506, 0, 0.0511, 0 ],
[ 1.2650, 0, -0.8091, 0 ]
]
bfgs = pyblp.Optimization('bfgs')
results = problem.solve(
initial_sigma,
initial_pi,
optimization=bfgs,
method='1s'
)
results
###Output
_____no_output_____
###Markdown
Additional post-estimation outputs can be computed with :class:`ProblemResults` methods. Elasticities and Diversion RatiosWe can estimate elasticities, $\varepsilon$, and diversion ratios, $\mathscr{D}$, with :meth:`ProblemResults.compute_elasticities` and :meth:`ProblemResults.compute_diversion_ratios`.As a reminder, elasticities in each market are$$\varepsilon_{jk} = \frac{x_k}{s_j}\frac{\partial s_j}{\partial x_k}.$$Diversion ratios are$$\mathscr{D}_{jk} = -\frac{\partial s_k}{\partial x_j} \Big/ \frac{\partial s_j}{\partial x_j}.$$Following :ref:`references:Conlon and Mortimer (2018)`, we report the diversion to the outside good $D_{j0}$ on the diagonal instead of $D_{jj}=-1$.
###Code
elasticities = results.compute_elasticities()
diversions = results.compute_diversion_ratios()
###Output
_____no_output_____
###Markdown
Post-estimation outputs are computed for each market and stacked. We'll use [matplotlib](https://matplotlib.org/) functions to display the matrices associated with a single market.
###Code
single_market = product_data['market_ids'] == 'C01Q1'
plt.colorbar(plt.matshow(elasticities[single_market]));
plt.colorbar(plt.matshow(diversions[single_market]));
###Output
_____no_output_____
###Markdown
The diagonal of the first image consists of own elasticities and the diagonal of the second image consists of diversion ratios to the outside good. As one might expect, own price elasticities are large and negative while cross-price elasticities are positive but much smaller.Elasticities and diversion ratios can be computed with respect to variables other than `prices` with the `name` argument of :meth:`ProblemResults.compute_elasticities` and :meth:`ProblemResults.compute_diversion_ratios`. Additionally, :meth:`ProblemResults.compute_long_run_diversion_ratios` can be used to used to understand substitution when products are eliminated from the choice set.The convenience methods :meth:`ProblemResults.extract_diagonals` and :meth:`ProblemResults.extract_diagonal_means` can be used to extract information about own elasticities of demand from elasticity matrices.
###Code
means = results.extract_diagonal_means(elasticities)
###Output
_____no_output_____
###Markdown
An alternative to summarizing full elasticity matrices is to use :meth:`ProblemResults.compute_aggregate_elasticities` to estimate aggregate elasticities of demand, $E$, in each market, which reflect the change in total sales under a proportional sales tax of some factor.
###Code
aggregates = results.compute_aggregate_elasticities(factor=0.1)
###Output
_____no_output_____
###Markdown
Since demand for an entire product category is generally less elastic than the average elasticity of individual products, mean own elasticities are generally larger in magnitude than aggregate elasticities.
###Code
plt.hist(
[means.flatten(), aggregates.flatten()],
color=['red', 'blue'],
bins=50
);
plt.legend(['Mean Own Elasticities', 'Aggregate Elasticities']);
###Output
_____no_output_____
###Markdown
Marginal Costs and MarkupsTo compute marginal costs, $c$, the `product_data` passed to :class:`Problem` must have had a `firm_ids` field. Since we included firm IDs when configuring the problem, we can use :meth:`ProblemResults.compute_costs`.
###Code
costs = results.compute_costs()
plt.hist(costs, bins=50);
plt.legend(["Marginal Costs"]);
###Output
_____no_output_____
###Markdown
Other methods that compute supply-side outputs often compute marginal costs themselves. For example, :meth:`ProblemResults.compute_markups` will compute marginal costs when estimating markups, $\mathscr{M}$, but computation can be sped up if we just use our pre-computed values.
###Code
markups = results.compute_markups(costs=costs)
plt.hist(markups, bins=50);
plt.legend(["Markups"]);
###Output
_____no_output_____
###Markdown
MergersBefore computing post-merger outputs, we'll supplement our pre-merger markups with some other outputs. We'll compute Herfindahl-Hirschman Indices, $\text{HHI}$, with :meth:`ProblemResults.compute_hhi`; population-normalized gross expected profits, $\pi$, with :meth:`ProblemResults.compute_profits`; and population-normalized consumer surpluses, $\text{CS}$, with :meth:`ProblemResults.compute_consumer_surpluses`.
###Code
hhi = results.compute_hhi()
profits = results.compute_profits(costs=costs)
cs = results.compute_consumer_surpluses()
###Output
_____no_output_____
###Markdown
To compute post-merger outputs, we'll create a new set of firm IDs that represent a merger of firms ``2`` and ``1``.
###Code
product_data['merger_ids'] = product_data['firm_ids'].replace(2, 1)
###Output
_____no_output_____
###Markdown
We can use :meth:`ProblemResults.compute_approximate_prices` or :meth:`ProblemResults.compute_prices` to estimate post-merger prices. The first method, which is discussed, for example, in :ref:`references:Nevo (1997)`, assumes that shares and their price derivatives are unaffected by the merger. The second method does not make these assumptions and iterates over the $\zeta$-markup equation from :ref:`references:Morrow and Skerlos (2011)` to solve the full system of $J_t$ equations and $J_t$ unknowns in each market $t$. We'll use the latter, since it is fast enough for this example problem.
###Code
changed_prices = results.compute_prices(
firm_ids=product_data['merger_ids'],
costs=costs
)
###Output
_____no_output_____
###Markdown
We'll compute post-merger shares with :meth:`ProblemResults.compute_shares`.
###Code
changed_shares = results.compute_shares(changed_prices)
###Output
_____no_output_____
###Markdown
Post-merger prices and shares are used to compute other post-merger outputs. For example, $\text{HHI}$ increases.
###Code
changed_hhi = results.compute_hhi(
firm_ids=product_data['merger_ids'],
shares=changed_shares
)
plt.hist(changed_hhi - hhi, bins=50);
plt.legend(["HHI Changes"]);
###Output
_____no_output_____
###Markdown
Markups, $\mathscr{M}$, and profits, $\pi$, generally increase as well.
###Code
changed_markups = results.compute_markups(changed_prices, costs)
plt.hist(changed_markups - markups, bins=50);
plt.legend(["Markup Changes"]);
changed_profits = results.compute_profits(changed_prices, changed_shares, costs)
plt.hist(changed_profits - profits, bins=50);
plt.legend(["Profit Changes"]);
###Output
_____no_output_____
###Markdown
On the other hand, consumer surpluses, $\text{CS}$, generally decrease.
###Code
changed_cs = results.compute_consumer_surpluses(changed_prices)
plt.hist(changed_cs - cs, bins=50);
plt.legend(["Consumer Surplus Changes"]);
###Output
_____no_output_____
###Markdown
Bootstrapping ResultsPost-estimation outputs can be informative, but they don't mean much without a sense sample-to-sample variability. One way to estimate confidence intervals for post-estimation outputs is with a standard bootstrap procedure:1. Construct a large number of bootstrap samples by sampling with replacement from the original product data.2. Initialize and solve a :class:`Problem` for each bootstrap sample.3. Compute the desired post-estimation output for each bootstrapped :class:`ProblemResults` and from the resulting empirical distribution, construct boostrap confidence intervals.Although appealing because of its simplicity, the computational resources required for this procedure are often prohibatively expensive. Furthermore, human oversight of the optimization routine is often required to determine whether the routine ran into any problems and if it successfully converged. Human oversight of estimation for each bootstrapped problem is usually not feasible.A more reasonable alternative is a parametric bootstrap procedure:1. Construct a large number of draws from the estimated joint distribution of parameters.2. Compute the implied mean utility, $\delta$, and shares, $s$, for each draw. If a supply side was estimated, also computed the implied marginal costs, $c$, and prices, $p$.3. Compute the desired post-estimation output under each of these parametric bootstrap samples. Again, from the resulting empirical distribution, construct boostrap confidence intervals.Compared to the standard bootstrap procedure, the parametric bootstrap requires far fewer computational resources, and is simple enough to not require human oversight of each bootstrap iteration. The primary complication to this procedure is that when supply is estimated, equilibrium prices and shares need to be computed for each parametric bootstrap sample by iterating over the $\zeta$-markup equation from :ref:`references:Morrow and Skerlos (2011)`. Although nontrivial, this fixed point iteration problem is much less demanding than the full optimization routine required to solve the BLP problem from the start.An empirical distribution of results computed according to this parametric bootstrap procedure can be created with the :meth:`ProblemResults.bootstrap` method, which returns a :class:`BootstrappedResults` class that can be used just like :class:`ProblemResults` to compute various post-estimation outputs. The difference is that :class:`BootstrappedResults` methods return arrays with an extra first dimension, along which bootstrapped results are stacked.We'll construct 90% parametric bootstrap confidence intervals for estimated mean own elasticities in each market of the fake cereal problem. Usually, bootstrapped confidence intervals should be based on thousands of draws, but we'll only use a few for the sake of speed in this example.
###Code
bootstrapped_results = results.bootstrap(draws=100, seed=0)
bootstrapped_results
bounds = np.percentile(
bootstrapped_results.extract_diagonal_means(
bootstrapped_results.compute_elasticities()
),
q=[10, 90],
axis=0
)
table = pd.DataFrame(index=problem.unique_market_ids, data={
'Lower Bound': bounds[0].flatten(),
'Mean Own Elasticity': aggregates.flatten(),
'Upper Bound': bounds[1].flatten()
})
table.round(2).head()
###Output
_____no_output_____
###Markdown
Optimal InstrumentsGiven a consistent estimate of $\theta$, we may want to compute the optimal instruments of :ref:`references:Chamberlain (1987)` and use them to re-solve the problem. Optimal instruments have been shown, for example, by :ref:`references:Reynaert and Verboven (2014)`, to reduce bias, improve efficiency, and enhance stability of BLP estimates.The :meth:`ProblemResults.compute_optimal_instruments` method computes the expected Jacobians that comprise the optimal instruments by integrating over the density of $\xi$ (and $\omega$ if a supply side was estimated). By default, the method approximates this integral by averaging over the Jacobian realizations computed under draws from the asymptotic normal distribution of the error terms. Since this process is computationally expensive and often doesn't make much of a difference, we'll use `method='approximate'` in this example to simply evaluate the Jacobians at the expected value of $\xi$, zero.
###Code
instrument_results = results.compute_optimal_instruments(method='approximate')
instrument_results
###Output
_____no_output_____
###Markdown
We can use the :meth:`OptimalInstrumentResults.to_problem` method to re-create the fake cereal problem with the estimated optimal excluded instruments.
###Code
updated_problem = instrument_results.to_problem()
updated_problem
###Output
_____no_output_____
###Markdown
We can solve this updated problem just like the original one. We'll start at our consistent estimate of $\theta$.
###Code
updated_results = updated_problem.solve(
results.sigma,
results.pi,
optimization=pyblp.Optimization('bfgs'),
method='1s'
)
updated_results
###Output
_____no_output_____
###Markdown
MergersBefore computing post-merger outputs, we'll supplement our pre-merger markups with some other outputs. We'll compute Herfindahl-Hirschman Indices, $\text{HHI}$, with :meth:`ProblemResults.compute_hhi`; population-normalized gross expected profits, $\pi$, with :meth:`ProblemResults.compute_profits`; and population-normalized consumer surpluses, $\text{CS}$, with :meth:`ProblemResults.compute_consumer_surpluses`.
###Code
hhi = results.compute_hhi()
profits = results.compute_profits(costs=costs)
cs = results.compute_consumer_surpluses()
###Output
_____no_output_____
###Markdown
To compute post-merger outputs, we'll create a new set of firm IDs that represent a merger of firms ``2`` and ``1``.
###Code
product_data['merger_ids'] = product_data['firm_ids'].replace(2, 1)
###Output
_____no_output_____
###Markdown
We can use :meth:`ProblemResults.compute_approximate_prices` or :meth:`ProblemResults.compute_prices` to estimate post-merger prices. The first method, which is discussed, for example, in :ref:`references:Nevo (1997)`, assumes that shares and their price derivatives are unaffected by the merger. The second method does not make these assumptions and iterates over the $\zeta$-markup equation from :ref:`references:Morrow and Skerlos (2011)` to solve the full system of $J_t$ equations and $J_t$ unknowns in each market $t$. We'll use the latter, since it is fast enough for this example problem.
###Code
changed_prices = results.compute_prices(
firm_ids=product_data['merger_ids'],
costs=costs
)
###Output
_____no_output_____
###Markdown
We'll compute post-merger shares with :meth:`ProblemResults.compute_shares`.
###Code
changed_shares = results.compute_shares(changed_prices)
###Output
_____no_output_____
###Markdown
Post-merger prices and shares are used to compute other post-merger outputs. For example, $\text{HHI}$ increases.
###Code
changed_hhi = results.compute_hhi(
firm_ids=product_data['merger_ids'],
shares=changed_shares
)
plt.hist(changed_hhi - hhi, bins=50);
plt.legend(["HHI Changes"]);
###Output
_____no_output_____
###Markdown
Markups, $\mathscr{M}$, and profits, $\pi$, generally increase as well.
###Code
changed_markups = results.compute_markups(changed_prices, costs)
plt.hist(changed_markups - markups, bins=50);
plt.legend(["Markup Changes"]);
changed_profits = results.compute_profits(changed_prices, changed_shares, costs)
plt.hist(changed_profits - profits, bins=50);
plt.legend(["Profit Changes"]);
###Output
_____no_output_____
###Markdown
On the other hand, consumer surpluses, $\text{CS}$, generally decrease.
###Code
changed_cs = results.compute_consumer_surpluses(changed_prices)
plt.hist(changed_cs - cs, bins=50);
plt.legend(["Consumer Surplus Changes"]);
###Output
_____no_output_____
###Markdown
Bootstrapping ResultsPost-estimation outputs can be informative, but they don't mean much without a sense sample-to-sample variability. One way to estimate confidence intervals for post-estimation outputs is with a standard bootstrap procedure:1. Construct a large number of bootstrap samples by sampling with replacement from the original product data.2. Initialize and solve a :class:`Problem` for each bootstrap sample.3. Compute the desired post-estimation output for each bootstrapped :class:`ProblemResults` and from the resulting empirical distribution, construct boostrap confidence intervals.Although appealing because of its simplicity, the computational resources required for this procedure are often prohibitively expensive. Furthermore, human oversight of the optimization routine is often required to determine whether the routine ran into any problems and if it successfully converged. Human oversight of estimation for each bootstrapped problem is usually not feasible.A more reasonable alternative is a parametric bootstrap procedure:1. Construct a large number of draws from the estimated joint distribution of parameters.2. Compute the implied mean utility, $\delta$, and shares, $s$, for each draw. If a supply side was estimated, also computed the implied marginal costs, $c$, and prices, $p$.3. Compute the desired post-estimation output under each of these parametric bootstrap samples. Again, from the resulting empirical distribution, construct boostrap confidence intervals.Compared to the standard bootstrap procedure, the parametric bootstrap requires far fewer computational resources, and is simple enough to not require human oversight of each bootstrap iteration. The primary complication to this procedure is that when supply is estimated, equilibrium prices and shares need to be computed for each parametric bootstrap sample by iterating over the $\zeta$-markup equation from :ref:`references:Morrow and Skerlos (2011)`. Although nontrivial, this fixed point iteration problem is much less demanding than the full optimization routine required to solve the BLP problem from the start.An empirical distribution of results computed according to this parametric bootstrap procedure can be created with the :meth:`ProblemResults.bootstrap` method, which returns a :class:`BootstrappedResults` class that can be used just like :class:`ProblemResults` to compute various post-estimation outputs. The difference is that :class:`BootstrappedResults` methods return arrays with an extra first dimension, along which bootstrapped results are stacked.We'll construct 90% parametric bootstrap confidence intervals for estimated mean own elasticities in each market of the fake cereal problem. Usually, bootstrapped confidence intervals should be based on thousands of draws, but we'll only use a few for the sake of speed in this example.
###Code
bootstrapped_results = results.bootstrap(draws=100, seed=0)
bootstrapped_results
bounds = np.percentile(
bootstrapped_results.extract_diagonal_means(
bootstrapped_results.compute_elasticities()
),
q=[10, 90],
axis=0
)
table = pd.DataFrame(index=problem.unique_market_ids, data={
'Lower Bound': bounds[0].flatten(),
'Mean Own Elasticity': aggregates.flatten(),
'Upper Bound': bounds[1].flatten()
})
table.round(2).head()
###Output
_____no_output_____
###Markdown
Optimal InstrumentsGiven a consistent estimate of $\theta$, we may want to compute the optimal instruments of :ref:`references:Chamberlain (1987)` and use them to re-solve the problem. Optimal instruments have been shown, for example, by :ref:`references:Reynaert and Verboven (2014)`, to reduce bias, improve efficiency, and enhance stability of BLP estimates.The :meth:`ProblemResults.compute_optimal_instruments` method computes the expected Jacobians that comprise the optimal instruments by integrating over the density of $\xi$ (and $\omega$ if a supply side was estimated). By default, the method approximates this integral by averaging over the Jacobian realizations computed under draws from the asymptotic normal distribution of the error terms. Since this process is computationally expensive and often doesn't make much of a difference, we'll use `method='approximate'` in this example to simply evaluate the Jacobians at the expected value of $\xi$, zero.
###Code
instrument_results = results.compute_optimal_instruments(method='approximate')
instrument_results
###Output
_____no_output_____
###Markdown
We can use the :meth:`OptimalInstrumentResults.to_problem` method to re-create the fake cereal problem with the estimated optimal excluded instruments.
###Code
updated_problem = instrument_results.to_problem()
updated_problem
###Output
_____no_output_____
###Markdown
We can solve this updated problem just like the original one. We'll start at our consistent estimate of $\theta$.
###Code
updated_results = updated_problem.solve(
results.sigma,
results.pi,
optimization=pyblp.Optimization('bfgs', {'gtol': 1e-5}),
method='1s'
)
updated_results
###Output
_____no_output_____
###Markdown
Post-Estimation Tutorial
###Code
%matplotlib inline
import pyblp
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
pyblp.options.digits = 2
pyblp.options.verbose = False
pyblp.__version__
###Output
_____no_output_____
###Markdown
This tutorial covers several features of `pyblp` which are available after estimation including:1. Calculating elasticities and diversion ratios.2. Calculating marginal costs and markups.3. Computing the effects of mergers: prices, shares, and HHI.4. Using a parametric bootstrap to estimate standard errors.5. Estimating optimal instruments. Problem ResultsAs in the [fake cereal tutorial](nevo.ipynb), we'll first solve the fake cereal problem from :ref:`references:Nevo (2000)`. We load the fake data and estimate the model as in the previous tutorial. We output the setup of the model to confirm we have correctly configured the :class:`Problem`
###Code
product_data = pd.read_csv(pyblp.data.NEVO_PRODUCTS_LOCATION)
agent_data = pd.read_csv(pyblp.data.NEVO_AGENTS_LOCATION)
product_formulations = (
pyblp.Formulation('0 + prices', absorb='C(product_ids)'),
pyblp.Formulation('1 + prices + sugar + mushy')
)
agent_formulation = pyblp.Formulation('0 + income + income_squared + age + child')
problem = pyblp.Problem(product_formulations, product_data, agent_formulation, agent_data)
problem
###Output
_____no_output_____
###Markdown
We'll solve the problem in the same way as before. The :meth:`Problem.solve` method returns a :meth:`ProblemResults` class, which displays basic estimation results. The results that are displayed are simply formatted information extracted from various class attributes such as :attr:`ProblemResults.sigma` and :attr:`ProblemResults.sigma_se`.
###Code
initial_sigma = np.diag([0.3302, 2.4526, 0.0163, 0.2441])
initial_pi = [
[ 5.4819, 0, 0.2037, 0 ],
[15.8935, -1.2000, 0, 2.6342],
[-0.2506, 0, 0.0511, 0 ],
[ 1.2650, 0, -0.8091, 0 ]
]
bfgs = pyblp.Optimization('bfgs')
results = problem.solve(
initial_sigma,
initial_pi,
optimization=bfgs,
method='1s'
)
results
###Output
_____no_output_____
###Markdown
Additional post-estimation outputs can be computed with :class:`ProblemResults` methods. Elasticities and Diversion RatiosWe can estimate elasticities, $\varepsilon$, and diversion ratios, $\mathscr{D}$, with :meth:`ProblemResults.compute_elasticities` and :meth:`ProblemResults.compute_diversion_ratios`.As a reminder, elasticities in each market are$$\varepsilon_{jk} = \frac{x_k}{s_j}\frac{\partial s_j}{\partial x_k}.$$Diversion ratios are$$\mathscr{D}_{jk} = -\frac{\partial s_k / \partial x_j}{\partial s_j / \partial x_j}.$$Following :ref:`references:Conlon and Mortimer (2018)`, we report the diversion to the outside good $D_{j0}$ on the diagonal instead of $D_{jj}=-1$.
###Code
elasticities = results.compute_elasticities()
diversions = results.compute_diversion_ratios()
###Output
_____no_output_____
###Markdown
Post-estimation outputs are computed for each market and stacked. We'll use [matplotlib](https://matplotlib.org/) functions to display the matrices associated with a single market.
###Code
single_market = product_data['market_ids'] == 'C01Q1'
plt.colorbar(plt.matshow(elasticities[single_market]));
plt.colorbar(plt.matshow(diversions[single_market]));
###Output
_____no_output_____
###Markdown
The diagonal of the first image consists of own elasticities and the diagonal of the second image consists of diversion ratios to the outside good. As one might expect, own price elasticities are large and negative while cross-price elasticities are positive but much smaller.Elasticities and diversion ratios can be computed with respect to variables other than `prices` with the `name` argument of :meth:`ProblemResults.compute_elasticities` and :meth:`ProblemResults.compute_diversion_ratios`. Additionally, :meth:`ProblemResults.compute_long_run_diversion_ratios` can be used to used to understand substitution when products are eliminated from the choice set.The convenience methods :meth:`ProblemResults.extract_diagonals` and :meth:`ProblemResults.extract_diagonal_means` can be used to extract information about own elasticities of demand from elasticity matrices.
###Code
means = results.extract_diagonal_means(elasticities)
###Output
_____no_output_____
###Markdown
An alternative to summarizing full elasticity matrices is to use :meth:`ProblemResults.compute_aggregate_elasticities` to estimate aggregate elasticities of demand, $E$, in each market, which reflect the change in total sales under a proportional sales tax of some factor.
###Code
aggregates = results.compute_aggregate_elasticities(factor=0.1)
###Output
_____no_output_____
###Markdown
Since demand for an entire product category is generally less elastic than the average elasticity of individual products, mean own elasticities are generally larger in magnitude than aggregate elasticities.
###Code
plt.hist(
[means.flatten(), aggregates.flatten()],
color=['red', 'blue'],
bins=50
);
plt.legend(['Mean Own Elasticities', 'Aggregate Elasticities']);
###Output
_____no_output_____
###Markdown
Marginal Costs and MarkupsTo compute marginal costs, $c$, the `product_data` passed to :class:`Problem` must have had a `firm_ids` field. Since we included firm IDs when configuring the problem, we can use :meth:`ProblemResults.compute_costs`.
###Code
costs = results.compute_costs()
plt.hist(costs, bins=50);
plt.legend(["Marginal Costs"]);
###Output
_____no_output_____
###Markdown
Other methods that compute supply-side outputs often compute marginal costs themselves. For example, :meth:`ProblemResults.compute_markups` will compute marginal costs when estimating markups, $\mathscr{M}$, but computation can be sped up if we just use our pre-computed values.
###Code
markups = results.compute_markups(costs=costs)
plt.hist(markups, bins=50);
plt.legend(["Markups"]);
###Output
_____no_output_____
###Markdown
MergersBefore computing post-merger outputs, we'll supplement our pre-merger markups with some other outputs. We'll compute Herfindahl-Hirschman Indices, $\text{HHI}$, with :meth:`ProblemResults.compute_hhi`; population-normalized gross expected profits, $\pi$, with :meth:`ProblemResults.compute_profits`; and population-normalized consumer surpluses, $\text{CS}$, with :meth:`ProblemResults.compute_consumer_surpluses`.
###Code
hhi = results.compute_hhi()
profits = results.compute_profits(costs=costs)
cs = results.compute_consumer_surpluses()
###Output
_____no_output_____
###Markdown
To compute post-merger outputs, we'll create a new set of firm IDs that represent a merger of firms ``2`` and ``1``.
###Code
product_data['merger_ids'] = product_data['firm_ids'].replace(2, 1)
###Output
_____no_output_____
###Markdown
We can use :meth:`ProblemResults.compute_approximate_prices` or :meth:`ProblemResults.compute_prices` to estimate post-merger prices. The first method, which is discussed, for example, in :ref:`references:Nevo (1997)`, assumes that shares and their price derivatives are unaffected by the merger. The second method does not make these assumptions and iterates over the $\zeta$-markup equation from :ref:`references:Morrow and Skerlos (2011)` to solve the full system of $J_t$ equations and $J_t$ unknowns in each market $t$. We'll use the latter, since it is fast enough for this example problem.
###Code
changed_prices = results.compute_prices(
firm_ids=product_data['merger_ids'],
costs=costs
)
###Output
_____no_output_____
###Markdown
If the problem was configured with more than two columns of firm IDs, we could estimate post-merger prices for the other mergers with the `firms_index` argument, which is by default `1`.We'll compute post-merger shares with :meth:`ProblemResults.compute_shares`.
###Code
changed_shares = results.compute_shares(changed_prices)
###Output
_____no_output_____
###Markdown
Post-merger prices and shares are used to compute other post-merger outputs. For example, $\text{HHI}$ increases.
###Code
changed_hhi = results.compute_hhi(
firm_ids=product_data['merger_ids'],
shares=changed_shares
)
plt.hist(changed_hhi - hhi, bins=50);
plt.legend(["HHI Changes"]);
###Output
_____no_output_____
###Markdown
Markups, $\mathscr{M}$, and profits, $\pi$, generally increase as well.
###Code
changed_markups = results.compute_markups(changed_prices, costs)
plt.hist(changed_markups - markups, bins=50);
plt.legend(["Markup Changes"]);
changed_profits = results.compute_profits(changed_prices, changed_shares, costs)
plt.hist(changed_profits - profits, bins=50);
plt.legend(["Profit Changes"]);
###Output
_____no_output_____
###Markdown
On the other hand, consumer surpluses, $\text{CS}$, generally decrease.
###Code
changed_cs = results.compute_consumer_surpluses(changed_prices)
plt.hist(changed_cs - cs, bins=50);
plt.legend(["Consumer Surplus Changes"]);
###Output
_____no_output_____
###Markdown
Bootstrapping ResultsPost-estimation outputs can be informative, but they don't mean much without a sense sample-to-sample variability. One way to estimate confidence intervals for post-estimation outputs is with a standard bootstrap procedure:1. Construct a large number of bootstrap samples by sampling with replacement from the original product data.2. Initialize and solve a :class:`Problem` for each bootstrap sample.3. Compute the desired post-estimation output for each bootstrapped :class:`ProblemResults` and from the resulting empirical distribution, construct boostrap confidence intervals.Although appealing because of its simplicity, the computational resources required for this procedure are often prohibatively expensive. Furthermore, human oversight of the optimization routine is often required to determine whether the routine ran into any problems and if it successfully converged. Human oversight of estimation for each bootstrapped problem is usually not feasible.A more reasonable alternative is a parametric bootstrap procedure:1. Construct a large number of draws from the estimated joint distribution of parameters.2. Compute the implied mean utility, $\delta$, and shares, $s$, for each draw. If a supply side was estimated, also computed the implied marginal costs, $c$, and prices, $p$.3. Compute the desired post-estimation output under each of these parametric bootstrap samples. Again, from the resulting empirical distribution, construct boostrap confidence intervals.Compared to the standard bootstrap procedure, the parametric bootstrap requires far fewer computational resources, and is simple enough to not require human oversight of each bootstrap iteration. The primary complication to this procedure is that when supply is estimated, equilibrium prices and shares need to be computed for each parametric bootstrap sample by iterating over the $\zeta$-markup equation from :ref:`references:Morrow and Skerlos (2011)`. Although nontrivial, this fixed point iteration problem is much less demanding than the full optimization routine required to solve the BLP problem from the start.An empirical distribution of results computed according to this parametric bootstrap procedure can be created with the :meth:`ProblemResults.bootstrap` method, which returns a :class:`BootstrappedResults` class that can be used just like :class:`ProblemResults` to compute various post-estimation outputs. The difference is that :class:`BootstrappedResults` methods return arrays with an extra first dimension, along which bootstrapped results are stacked.We'll construct 90% parametric bootstrap confidence intervals for estimated mean own elasticities in each market of the fake cereal problem. Usually, bootstrapped confidence intervals should be based on thousands of draws, but we'll only use a few for the sake of speed in this example.
###Code
bootstrapped_results = results.bootstrap(draws=100, seed=0)
bootstrapped_results
bounds = np.percentile(
bootstrapped_results.extract_diagonal_means(
bootstrapped_results.compute_elasticities()
),
q=[10, 90],
axis=0
)
table = pd.DataFrame(index=problem.unique_market_ids, data={
'Lower Bound': bounds[0].flatten(),
'Mean Own Elasticity': aggregates.flatten(),
'Upper Bound': bounds[1].flatten()
})
table.round(2).head()
###Output
_____no_output_____
###Markdown
Optimal InstrumentsGiven a consistent estimate of $\theta$, we may want to compute the optimal instruments of :ref:`references:Chamberlain (1987)` and use them to re-solve the problem. Optimal instruments have been shown, for example, by :ref:`references:Reynaert and Verboven (2014)`, to reduce bias, improve efficiency, and enhance stability of BLP estimates.The :meth:`ProblemResults.compute_optimal_instruments` method computes the expected Jacobians that comprise the optimal instruments by integrating over the density of $\xi$ (and $\omega$ if a supply side was estimated). By default, the method approximates this integral by averaging over the Jacobian realizations computed under draws from the asymptotic normal distribution of the error terms. Since this process is computationally expensive and often doesn't make much of a difference, we'll use `method='approximate'` in this example to simply evaluate the Jacobians at the expected value of $\xi$, zero.
###Code
instrument_results = results.compute_optimal_instruments(method='approximate')
instrument_results
###Output
_____no_output_____
###Markdown
We can use the :meth:`OptimalInstrumentResults.to_problem` method to re-create the fake cereal problem with the estimated optimal excluded instruments.
###Code
updated_problem = instrument_results.to_problem()
updated_problem
###Output
_____no_output_____
###Markdown
We can solve this updated problem just like the original one. We'll start at our consistent estimate of $\theta$.
###Code
updated_results = updated_problem.solve(
results.sigma,
results.pi,
optimization=pyblp.Optimization('bfgs'),
method='1s'
)
updated_results
###Output
_____no_output_____ |
MultivariateLinearRegression/Multivariate_Linear_Regression.ipynb | ###Markdown
Multivariate Linear Regression Multiple FeaturesLinear regression that uses multiple variables is known as "*multivariate linear regression*".
###Code
import tensorflow as tf
import numpy as np
theta_values = tf.constant([2.0, 3.0, 4.0], shape=[3,1])
x_values = tf.constant([1.0,3.0,9.0,], shape=[3,1])
result = tf.linalg.matmul(theta_values,x_values,transpose_a=True)
result
###Output
_____no_output_____ |
notebooks/K2c02_yso_summary_statistics.ipynb | ###Markdown
K2c02 YSO Summary Statistics Calculate the summary statistics for all 1678 YSO K2 C2 lightcurvesMichael Gully-Santiago January-February 2016There are 1678 light curve files for sources that were proposed for by Kraus, Hillenbrand, Covey, or (?) to be YSOs or YSO candidates in K2 Cycle 2. See my other notebook for how I downloaded them. Once we have the files it's a matter of what to do with them. Here we read in each file and construct the basic summary statistics using the pandas `describe()` function. We then save that series and some metadata into a data frame for each lightcurve. **The result is a $1658 \times 12 $ dataset of summary statistics for each K2 C2 lightcurve.** We had to drop 20 lightcurves that had NaN's for some reason.Finally, we construct some plots of the new data just to explore it.
###Code
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%config InlineBackend.figure_format = 'retina'
from astropy.utils.console import ProgressBar
import warnings
###Output
_____no_output_____
###Markdown
Make a list of all the lightcurve filenames.There's a wget script in the `data` directory that will automatically fetch each file. ```python!ls ../data/hl* > file_list.csv```
###Code
file_list_raw = pd.read_csv('file_list.csv', names=['fname'])
file_list = file_list_raw.fname.values
###Output
_____no_output_____
###Markdown
Compute the descriptive statistics for all 1678 of the YSO lightcurves from K2 Cycle 02.
###Code
columns = ['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max']
agg_data = pd.DataFrame(columns=columns)
n_files = len(file_list)
###Output
_____no_output_____
###Markdown
This takes about 1-2 minutes:
###Code
file = file_list[1]
raw_lc = pd.read_csv(file, index_col=False)
raw_lc.columns
plt.plot(raw_lc['BJD - 2454833'])
with ProgressBar(n_files, ipython_widget=True) as bar:
for i in range(n_files):
bar.update()
file = file_list[i]
raw_lc = pd.read_csv(file, index_col=False)
agg_data.loc[i] = raw_lc[' Corrected Flux'].describe()
###Output
###Markdown
Voilà:
###Code
agg_data['fname'] = file_list_raw.fname
agg_data['EPIC_ID'] = agg_data['fname'].str[33:33+9]
agg_data.head(3)
###Output
_____no_output_____
###Markdown
Plot the standard deviation ($\sigma$) of the light curve versus its interquartile range ($Q_3-Q_1$).
###Code
agg_data['logstd'] = np.log10(agg_data['std'])
agg_data['logiqr'] = np.log10(agg_data['75%']-agg_data['25%'])
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
tp = sns.jointplot(x="logiqr", y="logstd", alpha=0.2, kind='scatter',data=agg_data, xlim=(-4, 2));
###Output
//anaconda/lib/python3.4/site-packages/matplotlib/collections.py:590: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison
if self._edgecolors == str('face'):
###Markdown
The scatter of standard deviation above the interquartile range reflects sources with extra flicker noise beyond just secular sinusoidal variation.
###Code
ad = agg_data.dropna()
agg_data.shape, ad.shape
###Output
_____no_output_____
###Markdown
I had to drop 20 lightcurves with `NaN` values. Dunno what the deal is there.
###Code
#sns.distplot(ad['logiqr'].values, label='IQR')
###Output
_____no_output_____
###Markdown
Search for sources in a specific range of variability.
###Code
gi = (ad.logiqr < - 1.1) & (ad.logiqr > -1.2) & (ad.logstd <-1.4)
tp = sns.jointplot(x="logiqr", y="logstd", data=ad, xlim=(-2.0, -1.0), ylim=(-2.0, -1.0), alpha=0.5)
tp.ax_joint.plot(ad.logiqr[gi], ad.logstd[gi], '.', alpha=1.0)
print("There are {} sources that meet our above selection criterion".format(gi.sum()))
ad.EPIC_ID[gi].head()
sns.set_context('paper', font_scale=1.3)
sns.set_style('ticks')
###Output
_____no_output_____
###Markdown
Spot-check the light curves by eye-- look for sinusoidal variations.
###Code
ii = 226
plt.figure(figsize=(10, 4))
file = ad.fname[ii]
raw_lc = pd.read_csv(file, index_col=False)
plt.plot(raw_lc['BJD - 2454833'], raw_lc[' Corrected Flux'])
plt.ylim(0.90, 1.1)
plt.title("EPIC {} K2 C02 Light Curve".format(ad.EPIC_ID[ii]))
plt.xlabel('BJD - 2454833')
plt.ylabel('Flux');
###Output
_____no_output_____
###Markdown
Good candidates: 226, 271, 313, 403, 474 Good, but messy candidates:421, 440 Save the dataframe.
###Code
new_col_order = ['EPIC_ID','count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max', 'logstd', 'logiqr','fname']
ad = ad[new_col_order]
ad.head(2)
!mkdir ../analysis
ad.to_csv('../analysis/K2C02_YSO_agg_data.csv', index=False)
###Output
_____no_output_____ |
Kopitiam Emptied.ipynb | ###Markdown
Seq2Seq with PyTorch====Sequence-to-Sequence (Seq2Seq) learning is a useful class of neural network model to map sequential input into an output sequence. It has been shown to work well on various task, from machine translation to interpreting Python without an interpreter. {{citations-needed}}This notebook is a hands-on session to write an encoder-decoder Seq2Seq network using PyTorch for [DataScience SG meetup](https://www.meetup.com/DataScience-SG-Singapore/events/246541733/). Here's the accompanying slides for this notebook: https://goo.gl/Lu6CxBIt would be great if you have at least worked through the ["Deep Learning in 60 minutes" PyTorch tutorial](http://pytorch.org/tutorials/beginner/deep_learning_60min_blitz.html) before continuing the rest of the notebook.Acknowledgements----The dataset used in this exercise is hosted on https://www.kaggle.com/alvations/sg-kopiThe materials of this notebook and the accompanying slides are largely based on the - [PyTorch Seq2Seq tutorials by Sean Robertson](http://pytorch.org/tutorials/intermediate/seq2seq_translation_tutorial.html) and - [Luong et al. tutorial on neural machine translation in ACL16](https://sites.google.com/site/acl16nmt/home). Kopi Problems====In this hands-on session, we want to **train a neural network to translate from Singlish Kopi orders to English?****"Singlish" -> English**```"Kopi" -> Coffee with condensed milk"Kopi O" -> Coffee without milk or sugar"Kopi dinosaur gau siew dai peng" -> ???```(Image Source: http://www.straitstimes.com/lifestyle/food/get-your-kopi-kick)
###Code
Image(url="https://static.straitstimes.com.sg/sites/default/files/160522_kopi.jpg", width=700)
###Output
_____no_output_____
###Markdown
Seriously?----Yes, we'll be translating Singlish Kopi orders to English using the [sequence-to-sequence network](https://arxiv.org/abs/1409.3215) (Sutskever et al. 2014). But first...--- 1. Data Munging====Before any machine/deep learning, we have to get some data and "hammer" it until we get it into the shape we want.> *Data scientists spend 60% of their time on cleaning and organizing data. Collecting data sets comes second at 19% of their time, meaning data scientists spend around 80% of their time on preparing and managing data for analysis.*> (Source: [Gil Press](https://www.forbes.com/sites/gilpress/2016/03/23/data-preparation-most-time-consuming-least-enjoyable-data-science-task-survey-says/3e4dc0416f63) Forbes article)**Step 1:** Take the data from somewhere, in this case: http://kaggle.com/alvations/sg-kopi.**Step 2:** Import your favorite dataframe and text processing library.**Step 3:** Munge the data till desired.
###Code
import pandas as pd
from gensim.corpora.dictionary import Dictionary
from nltk import word_tokenize
# Reads the tab-delimited data using Pandas.
kopitiam = pd.read_csv('kopitiam.tsv', sep='\t')
kopitiam.head()
###Output
_____no_output_____
###Markdown
1.1. Reshaping the Data and Adding START and END Symbols----To get the data in shape, we want to: 1. normalize and tokenize of the input 2. pad the input with START (``) and END (``) symbols.If we look at the data carefully, sometimes we see that we have a mix of capitalized and lowered cased spellings, esp. in the "Local Terms" column. E.g. "Kopi O" and "Kopi o". For simplicity, we'll lowercase all the inputs and outputs so that our models don't think that the big "O" and the small "o" are different things.Additionally, we want to tokinze our input so that we pad the punctuations with spaces away from the preceeding or following word. There are many tokenization functions, we'll use the `word_tokenize()` function in `nltk`.As for padding the sentence with START and END symbols. It's an indication that we give to our Recurrent Neural Network (RNN) that denotes the start/end of our in/output sequences. (**Cut-away:** Here's some experts pitching in on why we need the START/END symbol. https://twitter.com/alvations/status/955770616648364037) TL;DR----Given, $[in]$:```kopi oblack coffee with sugar```We want, $[out]$:```['', 'kopi', 'o', '']['', 'black', 'coffee', 'with', 'sugar', '']```
###Code
# Use a unique string to indicate START and END of a sentence.
# Assign a unique index to them.
START, START_IDX = '<s>', 0
END, END_IDX = '</s>', 1
# We use this idiom to tokenize our sentences in the dataframe column:
# >>> DataFrame['column'].apply(str.lower).apply(word_tokenize)
# Also we added the START and the END symbol to the sentences.
singlish_sents = [START] + ??? + [END]
english_sents = [START] + ??? + [END]
# We're sort of getting into the data into the shape we want.
# But now it's still too humanly readable and redundant.
## Cut-away: Computers like it to be simpler, more concise. -_-|||
print('First Singlish sentence:', singlish_sents[0])
print('First English sentence:', english_sents[0])
###Output
First Singlish sentence: ['<s>', 'kopi', 'o', '</s>']
First English sentence: ['<s>', 'black', 'coffee', 'with', 'sugar', '</s>']
###Markdown
1.1.2 Vectorize the Data----There are many ways to vectorize text data. And since we are going to use RNN which requires the order of the sequences to be kept, we can simply convert our vocabulary (unique words) in the data into an indexed dictionary and replace each sentence as a list of indices. Thankfully, we don't have to write messy classes to create objects that stores these dictionary of indices to the respective words. We have the awesome `gensim` library and the [gensim.corpora.Dictionary](https://radimrehurek.com/gensim/corpora/dictionary.html) class.> **Note:** >> We want to `` and `` to take the 0th and 1st indices so we first initialize a sentence with only the `` symbol and another sentence with `` to prevent the native Python dictionary hashing that messes up the order of a set. > > So we'll do `Dictionary([[''], [''], ['UNK']])` before we use `Dictionary.add_documents()`.>> To convert the input sequence of tokens into list of indices we use the [`Dictionary.doc2idx()`](https://radimrehurek.com/gensim/corpora/dictionary.htmlgensim.corpora.dictionary.Dictionary.doc2idx) function. TL;DR----Given this $[in]$:```['', 'kopi', 'o', '']['', 'black', 'coffee', 'with', 'sugar', '']```We want $[out]$:```[0, 3, 4, 1][0, 3, 4, 6, 5, 1]```
###Code
# Let's convert the individual words into some sort of unique index
# and use the unique to represent the words.
## Cut-away: Integers = 1-2 bytes vs UTF-8 Strings = no. of chars * 1-2 bytes. @_@
english_vocab = Dictionary([['<s>'], ['</s>'], ['UNK']])
english_vocab.add_documents(english_sents)
singlish_vocab = Dictionary([['<s>'], ['</s>'], ['UNK']])
singlish_vocab.???
# First ten words in the vocabulary.
print('First 10 Singlish words in Dictionary:\n', sorted(singlish_vocab.items())[:10])
print()
print('First 10 English words in Dictionary:\n', sorted(english_vocab.items())[:10])
# Lets save our dictionaries.
with open('singlish_vocab.Dictionary.pkl', 'wb') as fout:
pickle.dump(singlish_vocab, fout)
with open('english_vocab.Dictionary.pkl', 'wb') as fout:
pickle.dump(???)
# Now, convert all the sentences into list of the indices
print('First Singlish sentence:')
print(singlish_sents[0])
print(singlish_vocab.doc2idx(singlish_sents[0]), end='\n\n')
print('First English sentence:')
print(english_sents[0])
print(english_vocab.doc2idx(english_sents[0]))
# Lets create a function to convert new sentences into the indexed forms.
def vectorize_sent(sent, vocab):
return vocab.???([START] + ??? + [END])
new_kopi = "Kopi dinosaur gau siew dai peng"
vectorize_sent(new_kopi, singlish_vocab)
###Output
_____no_output_____
###Markdown
1.1.3. Clobbering the Data into PyTorch Variable----For the last step of data hammering, we need to clobber the vectorized sentence into PyTorch `Variable` type. **Note:** Before continuing this notebook, you're strongly encourage to go through the following if you're unfamiliar with PyTorch:http://pytorch.org/tutorials/beginner/examples_autograd/two_layer_net_autograd.html
###Code
def variable_from_sent(sent, vocab):
vsent = vectorize_sent(sent, vocab)
result = Variable(torch.LongTensor(vsent).view(-1, 1))
return result.cuda() if use_cuda else result
new_kopi = "Kopi dinosaur gau siew dai peng"
variable_from_sent(new_kopi, singlish_vocab)
# To get the sentence length.
variable_from_sent(new_kopi, singlish_vocab).size()[0] # Includes START and END symbol.
# Prepare the whole training corpus.
singlish_tensors = kopitiam['Local Terms'].apply(lambda s: variable_from_sent(s, singlish_vocab))
english_tensors = kopitiam['Meaning'].apply(lambda s: variable_from_sent(s, english_vocab))
# Now, each item in `sent_pairs` is our data point.
sent_pairs = list(zip(singlish_tensors, english_tensors))
###Output
_____no_output_____
###Markdown
2. The Seq2Seq Model====A Recurrent Neural Network (RNN), is a network that operates on a sequence and uses its own output as input for subsequent steps.> *The general idea is to make **two recurrent neural network transform from one sequence to another**. An encoder network condenses an input sequence into a vector and a decoder netwrok unfolds the vector into a new sequence.* 2.1. The Encoder====The encoder of a seq2seq network is a RNN that outputs some value for every word from the input sentence. For every input word the encoder outputs a vector and a hidden state, and uses the hidden state for the next input word.
###Code
class EncoderRNN(nn.Module):
def __init__(self, input_size, hidden_size):
super(EncoderRNN, self).__init__()
# Set the no. of nodes for the hidden layer.
self.hidden_size = hidden_size
# Initialize the embedding layer with the
# - size of input (i.e. no. of words in input vocab)
# - no. of hidden nodes in the embedding layer
self.embedding = ???
# Initialize the GRU with the
# - size of the hidden layer from the previous state
# - size of the hidden layer from the current state
self.gru = ???
def forward(self, input, hidden):
# Feed the input into the embedding layer.
embedded = self.embedding(input).view(1, 1, -1)
# Feed the embedded layer with the hidden layer to the GRU.
# Update the output and hidden layer.
output, hidden = self.gru(???)
return output, hidden
def initialize_hidden_states(self):
result = Variable(torch.zeros(1, 1, self.hidden_size))
return result.cuda() if use_cuda else result
###Output
_____no_output_____
###Markdown
2.2. Simple Decoder====In the simplest seq2seq decoder we use only last output of the encoder. This last output is sometimes called the context vector as it encodes context from the entire sequence. This context vector is used as the initial hidden state of the decoder.At every step of decoding, the decoder is given an input token and hidden state. The initial input token is the start-of-string `` token, and the first hidden state is the context vector (the encoder’s last hidden state).
###Code
class DecoderRNN(nn.Module):
def __init__(self, hidden_size, output_size):
super(DecoderRNN, self).__init__()
# Set the no. of nodes for the hidden layer.
self.hidden_size = hidden_size
# Initialize the embedding layer with the
# - size of output (i.e. no. of words in output vocab)
# - no. of hidden nodes in the embedding layer
self.embedding = nn.Embedding(output_size, hidden_size)
# Initialize the GRU with the
# - size of the hidden layer from the previous state
# - size of the hidden layer from the current state
self.gru = nn.GRU(hidden_size, hidden_size)
# Set the output layer to output a specific symbol
# from the output vocabulary
self.softmax = nn.LogSoftmax(dim=1)
self.out = nn.Linear(hidden_size, output_size)
def forward(self, input, hidden):
# Feed the input into the embedding layer.
output = self.embedding(input).view(1, 1, -1)
# Transform the embedded output with a relu function.
output = F.relu(output)
# Feed the embedded layer with the hidden layer to the GRU.
# Update the output and hidden layer.
output, hidden = self.gru(???)
# Take the updated output and find the most appropriate
# output symbol. Hint: Softmax
output = ???
return output, hidden
def initialize_hidden_states(self):
result = ???
return result.cuda() if use_cuda else result
###Output
_____no_output_____
###Markdown
2.3. Training the Model====To train we run the input sentence through the encoder, and keep track of every output and the latest hidden state. Then the decoder is given the `` token as its first input, and the last hidden state of the encoder as its first hidden state. 2.3.1 Set the Hyperparamters and Prepare Data (again...)----As with all gradient methods in deep/machine learning, the basic idea is to: 1. Iterate through **batch_size** data points **epochs** no. of times 2. For each batch of data, calculate the **loss** between the (i) predicted output (y_hat) given the inputs (x) and (ii) the actual output (y). For deep learning models, **backpropagate** the loss 3. Make the **optimizer** take a step based on the **learning_rate** and the **backpropagated** losses 4. Repeat Step 1-3 until certain stopping criteria (e.g. the loss is no longer reducing or is taking an upwards trend or until a fix no. of epochs is completed) **Note:** If you're unfamiliar with the steps above, I strongly encourage you to: - Watch @sirajraval on https://www.youtube.com/embed/q555kfIFUCM and - Spend some time going through this blogpost by @iamtrask http://iamtrask.github.io/2015/07/12/basic-python-network/
###Code
hidden_size = 10
learning_rate=0.01
batch_size = 2
epochs = 30 # Since we are taking batch_size=2 and epochs=30, we only look at 60 data points.
criterion = nn.NLLLoss()
MAX_LENGTH=20
# Initialize the network for encoder and decoder.
input_vocab, output_vocab = singlish_vocab, english_vocab
encoder = EncoderRNN(???)
decoder = DecoderRNN(???)
if use_cuda:
encoder = encoder.cuda()
decoder = decoder.cuda()
# Initialize the optimizer for encoder and decoder.
encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)
decoder_optimizer = ???
# If batchsize == 1, choose 1 data points per batch:
##training_data = [[random.choice(sent_pairs)] for i in range(epochs)]
# If batch_size > 1, use random.sample() instead of random.choice:
training_data = [random.sample(sent_pairs, batch_size) for i in range(epochs)]
###Output
_____no_output_____
###Markdown
2.3.2. Loop through the batches---To start the model training, first we iterate through the batches.
###Code
#############################################
# 2.3.2. Loop through the batches.
#############################################
# Start the training.
for data_batch in training_data:
# (Re-)Initialize the optimizers, clear all gradients after every batch.
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
# Reset the loss for every batch.
loss = 0
for input_variable, target_variable in data_batch:
# Initialize the hidden_states for the encoder.
encoder_hidden = ???
# Initialize the length of the PyTorch variables.
input_length = input_variable.size()[0]
target_length = ???
encoder_outputs = Variable(torch.zeros(MAX_LENGTH, encoder.hidden_size))
encoder_outputs = encoder_outputs.cuda() if use_cuda else encoder_outputs
###Output
_____no_output_____
###Markdown
2.3.3. Iterating through each word in the encoder.----Moving on, for each batch, we iterate through the data points (i.e. sentence pairs).
###Code
#############################################
# 2.3.2. Loop through the batches.
#############################################
# Start the training.
for data_batch in training_data:
# (Re-)Initialize the optimizers, clear all gradients after every batch.
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
# Reset the loss for every batch.
loss = 0
for input_variable, target_variable in data_batch:
# Initialize the hidden_states for the encoder.
encoder_hidden = encoder.initialize_hidden_states()
# Initialize the length of the PyTorch variables.
input_length = input_variable.size()[0]
target_length = target_variable.size()[0]
encoder_outputs = Variable(torch.zeros(MAX_LENGTH, encoder.hidden_size))
encoder_outputs = encoder_outputs.cuda() if use_cuda else encoder_outputs
#############################################
# 2.3.3. Iterating through each word in the encoder.
#############################################
# Iterating through each word in the input.
for ei in range(input_length):
# We move forward through each state.
encoder_output, encoder_hidden = encoder(???)
# And we save the encoder outputs.
# Note: We're retrieving [0][0] cos remember the weird .view(1,1,-1) for the GRU.
encoder_outputs[ei] = encoder_output[0][0]
###Output
_____no_output_____
###Markdown
2.3.3.1. Outputs of the Encoder----Before we move on with the training data, we should take a look at the coolest feature of PyTorch (aka Tensorflow Eager mode way before eager mode is a thing). The fact that we can hijack the training process and start printing out layer output values or current parameters without needing to wait till the end of the training is pretty powerful. This is an artifact of how the PyTorch library designer allows users to probe and change the network at any point of time without first declaring and fixing a specific network. **Cut-away:** Here's some heated blogpost of imperative/declarative programming style.- Funny yet Informative: https://tylermcginnis.com/imperative-vs-declarative-programming/- Simple Python is de facto imperative but it's possible to do otherwise: http://www.benfrederickson.com/python-as-a-declarative-programming-language/<!--Pardon me being frank (not hotdog)----IMHO, I (Liling) don't really care how I write my network as long as the library allows me to have flexibility to alter networks and training mechanisms to suit what I'm trying to do. And for now, I write less code to do the same thing in PyTorch, so yeah... -->----Lets take a look at the last sentence we processed with the code above (Section 2.3.3.)----
###Code
# The encoded output for the last sentence in out training_data"
# The encoder has 68 unique words
print(encoder, '\n')
print(singlish_vocab)
print('\n########\n')
# The last input sentence, in PyTorch Tensor data structure.
print(data_batch[-1][0])
print('########\n')
# The last input sentence as list(int)
print(list(map(int, data_batch[-1][0])), '\n')
print('########\n')
# The last input sentence as list(int)
print(' '.join([singlish_vocab[i] for i in map(int, data_batch[-1][0])]))
print('\n########\n')
# The encoded outputs of the last sentence
# Note: We have a matrix of 20 (MAX_LENGTH) x 10 (hidden_size) and
# for this particular sentence, we only have 4 encoded outputs
print(encoder_outputs)
###Output
Variable containing:
-0.1214 0.2326 -0.3078 -0.0045 -0.0870 0.0150 0.4273 0.3584 0.1007 0.2582
-0.0815 0.2332 -0.4338 -0.3060 0.0044 0.1103 0.5286 -0.0093 0.4035 0.1736
-0.6873 0.0396 0.3701 -0.7467 -0.1440 -0.4195 0.5479 0.2969 0.4136 -0.0482
-0.4481 0.3308 -0.2921 -0.4302 -0.5618 -0.2736 0.3295 0.1484 0.5261 0.0894
0.1654 0.2799 0.1352 -0.2455 0.0506 -0.1016 -0.3219 -0.3612 0.3636 -0.1309
0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000
0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000
0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000
0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000
0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000
0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000
0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000
0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000
0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000
0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000
0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000
0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000
0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000
0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000
0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000
[torch.FloatTensor of size 20x10]
###Markdown
**Note:** We see only 5 rows are populated. With each rows representing the encoded output of the individual states as we step through the RNN. And the final row `encoded_outputs` will correspond to the `encoder_hidden`, i.e.
###Code
# The last hidden state of the last input sentence.
# Note: For vanilla RNN (Elman Net), the last hidden state of the encoder
# is the start state of the decoder's hidden state.
print(encoder_hidden)
###Output
Variable containing:
(0 ,.,.) =
Columns 0 to 8
-0.0078 -0.5923 0.6164 0.1475 -0.5511 0.6830 0.5584 0.0438 0.3793
Columns 9 to 9
-0.0317
[torch.FloatTensor of size 1x1x10]
###Markdown
2.3.4. Iterating through each word in the decoder.----After encoding, we 1. initialize the start of the decoder input with the index of our START symbol. 2. use the final encoded hidden state as the start of the decoder hidden state, i.e. `decoder_hidden = encoder_hidden`. 3. step through the state, i.e. `decoder_output, decoder_hidden = decoder(decoder_input, decoder_hidden)` 4. we map the softmax output (negative log probabilities to the words) and choose the best prediction as the predicted word for the current state, i.e. `topv, topi = decoder_output.data.topk(1); ni = topi[0][0]` 5. as we move through the decoder states (i.e. as we predict the previous words), we use the newly predicted word as the input to the next state, i.e. `decoder_input = Variable(torch.LongTensor([ni]))`
###Code
#############################################
# 2.3.2. Loop through the batches.
#############################################
# Start the training.
for data_batch in training_data:
# (Re-)Initialize the optimizers, clear all gradients after every batch.
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
# Reset the loss for every batch.
loss = 0
for input_variable, target_variable in data_batch:
# Initialize the hidden_states for the encoder.
encoder_hidden = encoder.initialize_hidden_states()
# Initialize the length of the PyTorch variables.
input_length = input_variable.size()[0]
target_length = target_variable.size()[0]
encoder_outputs = Variable(torch.zeros(MAX_LENGTH, encoder.hidden_size))
encoder_outputs = encoder_outputs.cuda() if use_cuda else encoder_outputs
#############################################
# 2.3.3. Iterating through each word in the encoder.
#############################################
# Iterating through each word in the input.
for ei in range(input_length):
# We move forward through each state.
encoder_output, encoder_hidden = encoder(input_variable[ei], encoder_hidden)
# And we save the encoder outputs.
# Note: We're retrieving [0][0] cos remember the weird .view(1,1,-1) -_-|||
encoder_outputs[ei] = encoder_output[0][0]
#############################################
# 2.3.4. Iterating through each word in the decoder.
#############################################
# Initialize the variable input with the index of the START.
decoder_input = Variable(torch.LongTensor([[START_IDX]]))
decoder_input = decoder_input.cuda() if use_cuda else decoder_input
# As the first state of the decoder, we take the last step of the encoder.
decoder_hidden = ???
# Iterate through each state in the decoder.
# Note: when we are training we know the length of the decoder.
# so we can use the trick to restrict the loop when decoding.
for di in range(target_length):
# We move forward through each state.
decoder_output, decoder_hidden = decoder(???)
# What are all these weird syntax, refer to 2.3.4.1
topv, topi = decoder_output.data.topk(1)
ni = topi[0][0]
# Replace our decoder input for the next state with the
# embedding of the decoded topi guess.
decoder_input = Variable(torch.LongTensor([[ni]]))
decoder_input = decoder_input.cuda() if use_cuda else decoder_input
# Update our loss for this batch.
loss += criterion(decoder_output, target_variable[di])
# If we see the </s> symbol, break the training.
if ni == END_IDX:
break
###Output
_____no_output_____
###Markdown
2.3.4.1 Outputs of the Decoder----Once again, we hijack the training process and take a look at what we're doing at the docoder. Here we see the last sentence in our training_data in the previous jupyter notebook cell:
###Code
# Cut-away: The decoded output for the last sentence in out training_data"
# The encoder has 117 unique words
print(decoder, '\n')
print(english_vocab)
print('\n########\n')
# The last input sentence.
print(' '.join([singlish_vocab[i] for i in map(int, data_batch[-1][0])]))
# The last target sentence.
print(' '.join([english_vocab[i] for i in map(int, data_batch[-1][1])]))
print('\n########\n')
###Output
DecoderRNN(
(embedding): Embedding(116, 10)
(gru): GRU(10, 10)
(softmax): LogSoftmax()
(out): Linear(in_features=10, out_features=116)
)
Dictionary(116 unique tokens: ['<s>', '</s>', 'black', 'coffee', 'sugar']...)
########
<s> tai ga ho </s>
<s> horlicks </s>
########
###Markdown
How did we arrive at the predicted word from decoder_output?-----We look at the `decoder_output.data` that shows a vector of 117 columns, each column correspond to the target word that we are predicting. The values are negative log probabilities.Then the `decoder_output.topk`, will filter and leave the topk predictions based on the vector of negative log probabilities. The `decoder_output.topk` will return a list of tuples were the first item is the score (i.e. negative log probability) and the second item is the index of the word for the target vocabulary.
###Code
# The last word in the last sentence.
print([english_vocab[i] for i in map(int, data_batch[-1][1])][-1])
print('\n########')
# The -log probability of the word that's most probably the
# correct target word as we moved from the encoder to the decoder.
print(decoder_output.data)
print('\n########')
# The word with the highest probability
print(decoder_output.data.topk(1))
print('\n########')
# Take a look at what's the decoder's guess for the final word in the last sentence.
topv, topi = decoder_output.data.topk(1)
print(topv) # The -log probability of the decoder's guess.
print(topi) # The index of the word in the english_vocab.
print(english_vocab[int(topi)]) # Decoder's guess of the final word.
###Output
</s>
########
Columns 0 to 9
-4.9682 -5.1267 -5.6028 -4.0663 -4.5102 -4.4591 -4.9175 -5.0732 -5.0293 -5.3705
Columns 10 to 19
-5.0894 -4.9453 -4.6394 -4.9664 -5.1267 -4.4543 -5.0366 -4.8566 -4.6203 -4.9958
Columns 20 to 29
-4.7038 -4.5959 -4.7804 -4.8709 -5.0501 -4.8864 -4.8876 -5.0000 -4.6976 -4.6324
Columns 30 to 39
-4.4754 -5.4241 -4.5974 -4.6645 -4.7227 -4.9907 -5.0174 -4.8825 -4.9364 -4.7163
Columns 40 to 49
-5.0280 -5.0492 -4.6629 -5.1601 -4.3666 -4.3587 -4.8414 -4.7386 -4.4162 -4.6509
Columns 50 to 59
-4.7710 -4.9370 -4.4109 -4.6621 -4.6100 -4.8891 -4.8433 -4.4666 -4.8315 -4.4337
Columns 60 to 69
-4.6725 -4.8010 -4.5824 -4.7252 -4.5903 -4.8131 -4.2811 -5.3550 -5.5460 -4.8272
Columns 70 to 79
-5.2190 -4.6178 -4.9321 -5.2433 -4.6670 -4.0502 -4.4425 -4.5862 -4.6299 -4.5233
Columns 80 to 89
-5.2702 -4.7900 -5.3953 -4.6982 -4.4740 -4.9680 -4.4162 -5.1073 -4.9964 -4.5141
Columns 90 to 99
-5.1502 -4.7556 -4.3382 -4.8319 -4.6833 -4.7920 -4.5417 -4.5824 -4.7229 -4.4789
Columns 100 to 109
-4.7015 -4.9493 -4.7649 -5.0959 -4.5520 -5.3925 -4.7920 -5.0965 -4.8581 -5.4309
Columns 110 to 116
-4.1824 -4.6310 -4.8550 -5.0144 -5.2293 -4.9815 -4.7103
[torch.FloatTensor of size 1x117]
########
(
-4.0502
[torch.FloatTensor of size 1x1]
,
75
[torch.LongTensor of size 1x1]
)
########
-4.0502
[torch.FloatTensor of size 1x1]
75
[torch.LongTensor of size 1x1]
the
###Markdown
2.3.5 Backpropagate the Loss and Optimizers Takes a Step.----The "magic" of deep learning libraries like PyTorch, Tensorflow, DyNet, etc. is that we don't have to write our own derivative and recursive backpropagation functions. In PyTorch, we simply do something like:```python>>> criterion = nn.NLLLoss()>>> ... ... (yada yada network)>>> loss += criterion(decoder_output, target_variable[di])>>> loss.backward() >>> optimizer.step()``` Viva la backpropaganda!
###Code
#############################################
# 2.3.2. Loop through the batches.
#############################################
# Start the training.
for data_batch in training_data:
# (Re-)Initialize the optimizers, clear all gradients after every batch.
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
# Reset the loss for every batch.
loss = 0
for input_variable, target_variable in data_batch:
# Initialize the hidden_states for the encoder.
encoder_hidden = encoder.initialize_hidden_states()
# Initialize the length of the PyTorch variables.
input_length = input_variable.size()[0]
target_length = target_variable.size()[0]
encoder_outputs = Variable(torch.zeros(MAX_LENGTH, encoder.hidden_size))
encoder_outputs = encoder_outputs.cuda() if use_cuda else encoder_outputs
#############################################
# 2.3.3. Iterating through each word in the encoder.
#############################################
# Iterating through each word in the input.
for ei in range(input_length):
# We move forward through each state.
encoder_output, encoder_hidden = encoder(input_variable[ei], encoder_hidden)
# And we save the encoder outputs.
# Note: We're retrieving [0][0] cos remember the weird .view(1,1,-1) -_-|||
encoder_outputs[ei] = encoder_output[0][0]
#############################################
# 2.3.4. Iterating through each word in the decoder.
#############################################
# Initialize the variable input with the index of the START.
decoder_input = Variable(torch.LongTensor([[START_IDX]]))
decoder_input = decoder_input.cuda() if use_cuda else decoder_input
# As the first state of the decoder, we take the last step of the encoder.
decoder_hidden = encoder_hidden
# Iterate through each state in the decoder.
# Note: when we are training we know the length of the decoder.
# so we can use the trick to restrict the loop when decoding.
for di in range(target_length):
# We move forward through each state.
decoder_output, decoder_hidden = decoder(decoder_input, decoder_hidden)
# What are alll these weird syntax, refer to 2.3.4.1
topv, topi = decoder_output.data.topk(1)
ni = topi[0][0]
# Replace our decoder input for the next state with the
# embedding of the decoded topi guess.
decoder_input = Variable(torch.LongTensor([[ni]]))
decoder_input = decoder_input.cuda() if use_cuda else decoder_input
# Update our loss for this batch.
loss += criterion(decoder_output, target_variable[di])
# If we see the </s> symbol, break the training.
if ni == END_IDX:
break
#####################################################
# 2.3.5 Backpropagate the Loss and Optimizers Takes a Step.
#####################################################
??? # Backpropagate.
encoder_optimizer.step()
decoder_optimizer.???
###Output
_____no_output_____
###Markdown
Let's try translating with the small model====It's good to note that the model has only seen 60 random data pairs of singlish and english sentences using these hyperparameters:```hidden_size = 10learning_rate=0.01batch_size = 2epochs = 30 Since we are taking batch_size=2 and epochs=30, we only look at 60 data points.criterion = nn.NLLLoss()MAX_LENGTH=20``` 2.3.6 Getting the Model to Translate====Remember that during training, our decode takes the `encoder_hidden` as in start state of the `decoder_hidden` and starts predicting the words as we move along the decoder states?Similarly, when translating input sentences with no target sentences, we'll do the same prediction in the decoder but the only difference is that we **DON'T** need to:- measure the difference between the prediction and the actual target sentence since we don't have it, (i.e. we don't need to do `criterion(decoder_output, target_variable[di])`) and- backpropagate nor update the loss- do anything to the optimizerYou can see that the `translator()` function is very much like our `train_one_epoch` code, we added: - the need to keep a list of the decoded words' indices - instead of returning the loss, we return the list of decoded word indices
###Code
def translator(encoder, decoder, input_variable, max_length=MAX_LENGTH):
# The length of the input.
input_length = input_variable.size()[0]
# For each sentence, initilize the hidden states with zeros.
encoder_hidden = ???
# Initialize the encoder outputs.
encoder_outputs = Variable(torch.zeros(max_length, encoder.hidden_size))
encoder_outputs = encoder_outputs.cuda() if use_cuda else encoder_outputs
# Iterate through the input words.
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(???)
# Initialize the decoder with the start symbol <s>.
decoder_input = Variable(torch.LongTensor([[START_IDX]]))
decoder_input = decoder_input.cuda() if use_cuda else decoder_input
# Use the last encoder hidden state as the first decoder's hidden state.
decoder_hidden = encoder_hidden
# Keep a list of the decoded words.
decoded_words = []
# Iterate through the decoder states.
for di in range(max_length):
# Very similar to how the training works.
decoder_output, decoder_hidden = decoder(???)
topv, topi = decoder_output.data.topk(1)
ni = topi[0][0]
if ni == END_IDX:
decoded_words.append(END_IDX)
break
else:
decoded_words.append(ni)
# Replace the new decoder input for the next state
# with the top guess of this state.
decoder_input = Variable(torch.LongTensor([[ni]]))
decoder_input = decoder_input.cuda() if use_cuda else decoder_input
return decoded_words
sent = 'kopi siew dai'
variable_from_sent(sent, singlish_vocab)
output_words = translator(my_encoder, my_decoder,
variable_from_sent(sent, singlish_vocab))
output_words
[english_vocab[i] for i in output_words[1:output_words.index(1)]]
def translate(kopi_order):
output_words = translator(my_encoder, my_decoder, variable_from_sent(kopi_order, singlish_vocab))
print(output_words)
output_sentence = [english_vocab[i] for i in output_words[1:output_words.index(1)]]
return ' '.join(output_sentence)
translate('kopi siew dai')
translate('kopi')
translate('kopi o')
translate('teh o')
###Output
[0, 29, 10, 4, 12, 28, 4, 1]
###Markdown
Of course, 60 data points is insufficient for the model to be trained properly!!!----Lets clean up the training code and train it longer so that it sees more sentence pairs! 2.4.1 Formalize the training per epoch as a function====Now we know how to write the - network architectures as objects in PyTorch - training process using standard Pythonic loops across epochs and each data points per epoch. Lets put everything together and write functions to simplify how we train a model.**Acknowledgement:** Largely, the following code is from http://pytorch.org/tutorials/intermediate/seq2seq_translation_tutorial.html
###Code
#########################################################
# Some Logging and Plotting Candies to Monitor Training
#########################################################
import time
import math
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (asMinutes(s), asMinutes(rs))
def showPlot(points):
plt.figure()
fig, ax = plt.subplots()
# this locator puts ticks at regular intervals
loc = ticker.MultipleLocator(base=0.2)
ax.yaxis.set_major_locator(loc)
plt.plot(points)
#########################################################
# Training per epoch,
# Iterates across data points per epoch.
#########################################################
def train_one_epoch(input_variable, target_variable, encoder, decoder,
encoder_optimizer, decoder_optimizer, criterion):
"""
Function to put the variables, decoder and optimizers to train per epoch.
"""
encoder_hidden = encoder.initialize_hidden_states()
# (Re-)Initialize the optimizers, clear all gradients.
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
# Initialize the length of the PyTorch variables.
input_length = input_variable.size()[0]
target_length = target_variable.size()[0]
encoder_outputs = Variable(torch.zeros(MAX_LENGTH, encoder.hidden_size))
encoder_outputs = encoder_outputs.cuda() if use_cuda else encoder_outputs
loss = 0
# Iterating through each word in the input.
for ei in range(input_length):
# We move forward through each state.
encoder_output, encoder_hidden = encoder(input_variable[ei], encoder_hidden)
# And we save the encoder outputs.
encoder_outputs[ei] = encoder_output[0][0]
# Initialize the variable input with the index of the START.
decoder_input = Variable(torch.LongTensor([[START_IDX]]))
decoder_input = decoder_input.cuda() if use_cuda else decoder_input
# As the first state of the decoder, we take the last step of the encoder.
decoder_hidden = encoder_hidden
# Without teacher forcing: use its own predictions as the next input
for di in range(target_length):
decoder_output, decoder_hidden = decoder(
decoder_input, decoder_hidden)
topv, topi = decoder_output.data.topk(1)
ni = topi[0][0]
decoder_input = Variable(torch.LongTensor([[ni]]))
decoder_input = decoder_input.cuda() if use_cuda else decoder_input
loss += criterion(decoder_output, target_variable[di])
if ni == END_IDX:
break
loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
return loss.data[0] / target_length
#########################################################
# Top-level function to start the training,
# iterates across epochs.
#########################################################
def train(encoder, decoder, n_iters, print_every=1000, plot_every=100, learning_rate=0.01):
start = time.time()
plot_losses = []
print_loss_total = 0 # Reset every print_every
plot_loss_total = 0 # Reset every plot_every
encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)
decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)
training_pairs = [random.choice(sent_pairs) for i in range(n_iters)]
criterion = nn.NLLLoss()
for iter in range(1, n_iters + 1):
training_pair = training_pairs[iter - 1]
input_variable = training_pair[0]
target_variable = training_pair[1]
loss = train_one_epoch(input_variable, target_variable, encoder,
decoder, encoder_optimizer, decoder_optimizer, criterion)
print_loss_total += loss
plot_loss_total += loss
if iter % print_every == 0:
print_loss_avg = print_loss_total / print_every
print_loss_total = 0
print('%s (%d %d%%) %.4f' % (timeSince(start, iter / n_iters),
iter, iter / n_iters * 100, print_loss_avg))
if iter % plot_every == 0:
plot_loss_avg = plot_loss_total / plot_every
plot_losses.append(plot_loss_avg)
plot_loss_total = 0
showPlot(plot_losses)
###Output
_____no_output_____
###Markdown
Lets Re-Train====
###Code
MAX_LENGTH = 20
batches = 100000 # In this case, the PyTorch train_per_epoch() and train() code is using batch_size=1
hidden_size = 100
my_encoder = EncoderRNN(len(singlish_vocab), hidden_size)
my_decoder = DecoderRNN(hidden_size, len(english_vocab))
if use_cuda:
my_encoder = my_encoder.cuda()
my_decoder = my_decoder.cuda()
train(my_encoder, my_decoder, batches, print_every=100)
###Output
0m 1s (- 19m 27s) (100 0%) 2.6212
0m 2s (- 18m 45s) (200 0%) 2.0315
0m 3s (- 17m 31s) (300 0%) 1.9701
0m 4s (- 17m 4s) (400 0%) 1.9609
0m 5s (- 16m 40s) (500 0%) 2.0464
0m 5s (- 16m 29s) (600 0%) 2.1187
0m 6s (- 16m 18s) (700 0%) 1.9449
0m 7s (- 16m 11s) (800 0%) 2.0178
0m 8s (- 16m 7s) (900 0%) 2.0096
0m 9s (- 16m 6s) (1000 1%) 1.9958
0m 10s (- 16m 9s) (1100 1%) 1.9795
0m 11s (- 16m 8s) (1200 1%) 1.8545
0m 12s (- 16m 8s) (1300 1%) 1.7732
0m 13s (- 16m 8s) (1400 1%) 1.5005
0m 14s (- 16m 9s) (1500 1%) 1.6381
0m 15s (- 16m 5s) (1600 1%) 1.6208
0m 16s (- 16m 3s) (1700 1%) 1.5456
0m 17s (- 16m 2s) (1800 1%) 1.4217
0m 18s (- 16m 4s) (1900 1%) 1.5309
0m 19s (- 16m 3s) (2000 2%) 1.4450
0m 20s (- 15m 59s) (2100 2%) 1.4215
0m 21s (- 16m 0s) (2200 2%) 1.4209
0m 22s (- 16m 0s) (2300 2%) 1.2946
0m 23s (- 16m 1s) (2400 2%) 1.4898
0m 24s (- 16m 2s) (2500 2%) 1.2311
0m 25s (- 16m 1s) (2600 2%) 1.2861
0m 26s (- 16m 2s) (2700 2%) 1.1607
0m 27s (- 16m 1s) (2800 2%) 1.3717
0m 28s (- 16m 2s) (2900 2%) 1.3822
0m 29s (- 16m 4s) (3000 3%) 1.3662
0m 30s (- 16m 4s) (3100 3%) 1.1817
0m 31s (- 16m 5s) (3200 3%) 1.3635
0m 32s (- 16m 5s) (3300 3%) 1.2366
0m 34s (- 16m 9s) (3400 3%) 1.2331
0m 35s (- 16m 8s) (3500 3%) 1.1480
0m 36s (- 16m 14s) (3600 3%) 1.1370
0m 38s (- 16m 42s) (3700 3%) 1.1261
0m 39s (- 16m 51s) (3800 3%) 1.1117
0m 41s (- 16m 55s) (3900 3%) 1.1260
0m 42s (- 16m 58s) (4000 4%) 1.1846
0m 43s (- 16m 59s) (4100 4%) 0.9976
0m 44s (- 17m 0s) (4200 4%) 1.0898
0m 46s (- 17m 7s) (4300 4%) 1.0471
0m 47s (- 17m 13s) (4400 4%) 0.9087
0m 48s (- 17m 19s) (4500 4%) 0.9965
0m 50s (- 17m 24s) (4600 4%) 0.9225
0m 51s (- 17m 34s) (4700 4%) 1.0338
0m 53s (- 17m 36s) (4800 4%) 0.9872
0m 54s (- 17m 37s) (4900 4%) 0.9865
0m 55s (- 17m 37s) (5000 5%) 0.9203
0m 57s (- 17m 42s) (5100 5%) 0.8926
0m 58s (- 17m 47s) (5200 5%) 0.9575
0m 59s (- 17m 50s) (5300 5%) 0.9738
1m 1s (- 17m 51s) (5400 5%) 0.8469
1m 2s (- 17m 58s) (5500 5%) 0.8140
1m 4s (- 18m 3s) (5600 5%) 0.8182
1m 5s (- 18m 3s) (5700 5%) 0.7970
1m 6s (- 18m 1s) (5800 5%) 0.7954
1m 7s (- 17m 59s) (5900 5%) 0.8720
1m 8s (- 17m 59s) (6000 6%) 0.9509
1m 10s (- 17m 59s) (6100 6%) 0.7458
1m 11s (- 17m 57s) (6200 6%) 0.7547
1m 12s (- 17m 58s) (6300 6%) 0.7434
1m 13s (- 18m 0s) (6400 6%) 0.7218
1m 14s (- 17m 58s) (6500 6%) 0.5513
1m 16s (- 17m 58s) (6600 6%) 0.7797
1m 17s (- 18m 4s) (6700 6%) 0.7203
1m 19s (- 18m 5s) (6800 6%) 0.7622
1m 20s (- 18m 4s) (6900 6%) 0.6791
1m 21s (- 18m 3s) (7000 7%) 0.6535
1m 22s (- 18m 5s) (7100 7%) 0.7205
1m 24s (- 18m 5s) (7200 7%) 0.6958
1m 26s (- 18m 13s) (7300 7%) 0.6917
1m 27s (- 18m 14s) (7400 7%) 0.7571
1m 28s (- 18m 13s) (7500 7%) 0.7107
1m 29s (- 18m 11s) (7600 7%) 0.5905
1m 31s (- 18m 13s) (7700 7%) 0.6321
1m 32s (- 18m 14s) (7800 7%) 0.5832
1m 33s (- 18m 13s) (7900 7%) 0.5936
1m 35s (- 18m 15s) (8000 8%) 0.6117
1m 37s (- 18m 21s) (8100 8%) 0.6204
1m 38s (- 18m 25s) (8200 8%) 0.6432
1m 39s (- 18m 24s) (8300 8%) 0.5753
1m 41s (- 18m 23s) (8400 8%) 0.5785
1m 42s (- 18m 23s) (8500 8%) 0.5571
1m 44s (- 18m 26s) (8600 8%) 0.6137
1m 46s (- 18m 36s) (8700 8%) 0.4746
1m 47s (- 18m 39s) (8800 8%) 0.6111
1m 50s (- 18m 53s) (8900 8%) 0.5273
1m 52s (- 19m 0s) (9000 9%) 0.5126
1m 54s (- 19m 1s) (9100 9%) 0.5298
1m 55s (- 19m 2s) (9200 9%) 0.5910
1m 57s (- 19m 4s) (9300 9%) 0.5111
1m 59s (- 19m 13s) (9400 9%) 0.4428
2m 1s (- 19m 17s) (9500 9%) 0.4999
2m 3s (- 19m 22s) (9600 9%) 0.4806
2m 5s (- 19m 27s) (9700 9%) 0.5871
2m 7s (- 19m 30s) (9800 9%) 0.5160
2m 9s (- 19m 35s) (9900 9%) 0.6229
2m 10s (- 19m 36s) (10000 10%) 0.5398
2m 12s (- 19m 38s) (10100 10%) 0.6497
2m 13s (- 19m 37s) (10200 10%) 0.5974
2m 15s (- 19m 36s) (10300 10%) 0.4924
2m 16s (- 19m 38s) (10400 10%) 0.4909
2m 18s (- 19m 39s) (10500 10%) 0.5640
2m 20s (- 19m 41s) (10600 10%) 0.4897
2m 21s (- 19m 44s) (10700 10%) 0.4988
2m 23s (- 19m 43s) (10800 10%) 0.5413
2m 24s (- 19m 40s) (10900 10%) 0.5353
2m 25s (- 19m 38s) (11000 11%) 0.4279
2m 26s (- 19m 36s) (11100 11%) 0.5253
2m 28s (- 19m 34s) (11200 11%) 0.4779
2m 29s (- 19m 31s) (11300 11%) 0.4906
2m 30s (- 19m 28s) (11400 11%) 0.5002
2m 31s (- 19m 25s) (11500 11%) 0.4955
2m 32s (- 19m 23s) (11600 11%) 0.4607
2m 33s (- 19m 21s) (11700 11%) 0.4950
2m 35s (- 19m 19s) (11800 11%) 0.4877
2m 36s (- 19m 17s) (11900 11%) 0.4831
2m 37s (- 19m 15s) (12000 12%) 0.4683
2m 38s (- 19m 14s) (12100 12%) 0.3873
2m 40s (- 19m 13s) (12200 12%) 0.5174
2m 41s (- 19m 12s) (12300 12%) 0.4287
2m 43s (- 19m 12s) (12400 12%) 0.4794
2m 44s (- 19m 12s) (12500 12%) 0.5395
2m 46s (- 19m 11s) (12600 12%) 0.5392
2m 47s (- 19m 13s) (12700 12%) 0.5140
2m 49s (- 19m 12s) (12800 12%) 0.5447
2m 50s (- 19m 10s) (12900 12%) 0.5946
2m 52s (- 19m 13s) (13000 13%) 0.4198
2m 54s (- 19m 15s) (13100 13%) 0.4372
2m 55s (- 19m 13s) (13200 13%) 0.4922
2m 56s (- 19m 11s) (13300 13%) 0.4843
2m 59s (- 19m 18s) (13400 13%) 0.4710
3m 1s (- 19m 23s) (13500 13%) 0.4680
3m 2s (- 19m 22s) (13600 13%) 0.5315
3m 4s (- 19m 22s) (13700 13%) 0.5375
3m 5s (- 19m 20s) (13800 13%) 0.4862
3m 7s (- 19m 18s) (13900 13%) 0.5427
3m 8s (- 19m 17s) (14000 14%) 0.3887
3m 9s (- 19m 15s) (14100 14%) 0.4617
3m 10s (- 19m 13s) (14200 14%) 0.4378
3m 12s (- 19m 13s) (14300 14%) 0.4251
3m 13s (- 19m 12s) (14400 14%) 0.4816
3m 15s (- 19m 13s) (14500 14%) 0.5405
3m 17s (- 19m 13s) (14600 14%) 0.5149
3m 18s (- 19m 13s) (14700 14%) 0.5146
3m 20s (- 19m 12s) (14800 14%) 0.4540
3m 21s (- 19m 11s) (14900 14%) 0.5449
3m 23s (- 19m 11s) (15000 15%) 0.4820
3m 24s (- 19m 10s) (15100 15%) 0.3799
3m 26s (- 19m 9s) (15200 15%) 0.4905
3m 27s (- 19m 8s) (15300 15%) 0.4813
3m 28s (- 19m 6s) (15400 15%) 0.4140
3m 29s (- 19m 3s) (15500 15%) 0.5446
3m 31s (- 19m 4s) (15600 15%) 0.5155
3m 33s (- 19m 5s) (15700 15%) 0.4819
3m 34s (- 19m 3s) (15800 15%) 0.3921
3m 35s (- 19m 1s) (15900 15%) 0.4813
3m 36s (- 18m 58s) (16000 16%) 0.4776
3m 38s (- 18m 57s) (16100 16%) 0.4415
3m 39s (- 18m 56s) (16200 16%) 0.4435
3m 40s (- 18m 54s) (16300 16%) 0.3817
3m 42s (- 18m 52s) (16400 16%) 0.4782
3m 43s (- 18m 50s) (16500 16%) 0.4090
3m 44s (- 18m 49s) (16600 16%) 0.4836
3m 46s (- 18m 47s) (16700 16%) 0.5169
3m 47s (- 18m 47s) (16800 16%) 0.4211
3m 48s (- 18m 45s) (16900 16%) 0.4901
3m 50s (- 18m 43s) (17000 17%) 0.4240
3m 51s (- 18m 41s) (17100 17%) 0.4438
3m 52s (- 18m 39s) (17200 17%) 0.5077
3m 53s (- 18m 37s) (17300 17%) 0.4263
3m 54s (- 18m 35s) (17400 17%) 0.3913
3m 56s (- 18m 33s) (17500 17%) 0.4183
3m 57s (- 18m 31s) (17600 17%) 0.3963
3m 58s (- 18m 29s) (17700 17%) 0.4108
4m 0s (- 18m 29s) (17800 17%) 0.4083
4m 1s (- 18m 28s) (17900 17%) 0.4686
4m 3s (- 18m 29s) (18000 18%) 0.4544
4m 5s (- 18m 30s) (18100 18%) 0.4831
4m 6s (- 18m 30s) (18200 18%) 0.5226
4m 8s (- 18m 28s) (18300 18%) 0.4100
4m 9s (- 18m 27s) (18400 18%) 0.4876
4m 12s (- 18m 31s) (18500 18%) 0.4229
4m 13s (- 18m 30s) (18600 18%) 0.5095
4m 15s (- 18m 28s) (18700 18%) 0.4397
4m 16s (- 18m 28s) (18800 18%) 0.4783
4m 17s (- 18m 26s) (18900 18%) 0.4967
4m 19s (- 18m 25s) (19000 19%) 0.4985
4m 20s (- 18m 24s) (19100 19%) 0.3656
4m 21s (- 18m 22s) (19200 19%) 0.4487
4m 23s (- 18m 20s) (19300 19%) 0.4748
4m 24s (- 18m 17s) (19400 19%) 0.4515
4m 25s (- 18m 15s) (19500 19%) 0.4901
4m 26s (- 18m 13s) (19600 19%) 0.4169
4m 27s (- 18m 11s) (19700 19%) 0.3527
4m 29s (- 18m 10s) (19800 19%) 0.3894
4m 30s (- 18m 9s) (19900 19%) 0.4173
4m 32s (- 18m 10s) (20000 20%) 0.4066
4m 34s (- 18m 10s) (20100 20%) 0.4299
4m 35s (- 18m 8s) (20200 20%) 0.4273
4m 37s (- 18m 7s) (20300 20%) 0.4308
4m 38s (- 18m 6s) (20400 20%) 0.3655
4m 39s (- 18m 4s) (20500 20%) 0.3752
4m 40s (- 18m 2s) (20600 20%) 0.4432
4m 42s (- 18m 1s) (20700 20%) 0.4851
4m 43s (- 18m 0s) (20800 20%) 0.3114
4m 44s (- 17m 58s) (20900 20%) 0.4381
4m 46s (- 17m 57s) (21000 21%) 0.4211
4m 47s (- 17m 55s) (21100 21%) 0.3877
4m 49s (- 17m 54s) (21200 21%) 0.2837
4m 50s (- 17m 52s) (21300 21%) 0.3812
4m 51s (- 17m 50s) (21400 21%) 0.3746
4m 52s (- 17m 49s) (21500 21%) 0.3716
4m 54s (- 17m 47s) (21600 21%) 0.4938
4m 55s (- 17m 45s) (21700 21%) 0.4438
4m 56s (- 17m 43s) (21800 21%) 0.3613
4m 57s (- 17m 41s) (21900 21%) 0.4583
4m 58s (- 17m 39s) (22000 22%) 0.3963
5m 0s (- 17m 37s) (22100 22%) 0.3438
5m 1s (- 17m 35s) (22200 22%) 0.3765
5m 2s (- 17m 34s) (22300 22%) 0.4002
5m 3s (- 17m 32s) (22400 22%) 0.3995
###Markdown
Before moving on, SAVE THE MODELS!!!
###Code
# Here's a nice bleeding edge Python trick, (only works on Python3.6)
# F-strings for the win!!
# See https://www.python.org/dev/peps/pep-0498/
print(hidden_size, batches)
print(f'encoder_vanilla_{hidden_size}_{batches}.pkl')
import pickle
# In Python >= 3.6
with open(f'encoder_vanilla_{hidden_size}_{batches}.pkl', 'wb') as fout:
pickle.dump(my_encoder, fout)
with open(f'decoder_vanilla_{hidden_size}_{batches}.pkl', 'wb') as fout:
pickle.dump(my_decoder, fout)
# For Python < 3.6
with open('encoder_vanilla_{}_{}.pkl'.format(hidden_size, batches), 'wb') as fout:
pickle.dump(my_encoder, fout)
with open('decoder_vanilla_{}_{}.pkl'.format(hidden_size, batches), 'wb') as fout:
pickle.dump(my_decoder, fout)
translate('kopi siew dai')
translate('Kopi gau siew dai peng')
translate('Kopi O gau')
translate('Teh poh')
translate('kopi tiloh')
translate('kopi c peng')
###Output
[0, 22, 2, 3, 5, 7, 1]
|
CNN/CNN_simple.ipynb | ###Markdown
2020 Fall Final Project CNN model 2 Data Preprocessing
###Code
import os
import random
from shutil import copy2
import keras
keras.__version__
print('number of A:',len(os.listdir('/Users/lixinyi/Desktop/BIA667final/data/A')))
print('number of B:',len(os.listdir('/Users/lixinyi/Desktop/BIA667final/data/B')))
print('number of E:',len(os.listdir('/Users/lixinyi/Desktop/BIA667final/data/E')))
print('number of G:',len(os.listdir('/Users/lixinyi/Desktop/BIA667final/data/G')))
base_dir = '/Users/lixinyi/Desktop/BIA667final/data'
train_dir = os.path.join(base_dir, 'train')
if not os.path.exists(train_dir):
os.mkdir(train_dir)
validation_dir = os.path.join(base_dir, 'validation')
if not os.path.exists(validation_dir):
os.mkdir(validation_dir)
test_dir = os.path.join(base_dir, 'test')
if not os.path.exists(test_dir):
os.mkdir(test_dir)
train_a_dir = os.path.join(train_dir, 'A')
if not os.path.exists(train_a_dir):
os.mkdir(train_a_dir)
train_b_dir = os.path.join(train_dir, 'B')
if not os.path.exists(train_b_dir):
os.mkdir(train_b_dir)
train_e_dir = os.path.join(train_dir, 'E')
if not os.path.exists(train_e_dir):
os.mkdir(train_e_dir)
train_g_dir = os.path.join(train_dir, 'G')
if not os.path.exists(train_g_dir):
os.mkdir(train_g_dir)
test_a_dir = os.path.join(test_dir, 'A')
if not os.path.exists(test_a_dir):
os.mkdir(test_a_dir)
test_b_dir = os.path.join(test_dir, 'B')
if not os.path.exists(test_b_dir):
os.mkdir(test_b_dir)
test_e_dir = os.path.join(test_dir, 'E')
if not os.path.exists(test_e_dir):
os.mkdir(test_e_dir)
test_g_dir = os.path.join(test_dir, 'G')
if not os.path.exists(test_g_dir):
os.mkdir(test_g_dir)
validation_a_dir = os.path.join(validation_dir, 'A')
if not os.path.exists(validation_a_dir):
os.mkdir(validation_a_dir)
validation_b_dir = os.path.join(validation_dir, 'B')
if not os.path.exists(validation_b_dir):
os.mkdir(validation_b_dir)
validation_e_dir = os.path.join(validation_dir, 'E')
if not os.path.exists(validation_e_dir):
os.mkdir(validation_e_dir)
validation_g_dir = os.path.join(validation_dir, 'G')
if not os.path.exists(validation_g_dir):
os.mkdir(validation_g_dir)
A_dir = '/Users/lixinyi/Desktop/BIA667final/data/A'
B_dir = '/Users/lixinyi/Desktop/BIA667final/data/B'
E_dir = '/Users/lixinyi/Desktop/BIA667final/data/E'
G_dir = '/Users/lixinyi/Desktop/BIA667final/data/G'
num_A = len(os.listdir(A_dir))
num_B = len(os.listdir(B_dir))
num_E = len(os.listdir(E_dir))
num_G = len(os.listdir(G_dir))
A_all = os.listdir(A_dir)
B_all = os.listdir(B_dir)
E_all = os.listdir(E_dir)
G_all = os.listdir(G_dir)
index_list_a = list(range(num_A))
index_list_b = list(range(num_B))
index_list_e = list(range(num_E))
index_list_g = list(range(num_G))
random.shuffle(index_list_a)
random.shuffle(index_list_b)
random.shuffle(index_list_e)
random.shuffle(index_list_g)
num = 0
for i in index_list_a:
fileName = os.path.join(A_dir, A_all[i])
if num < num_A*0.6:
print(str(fileName))
copy2(fileName, train_a_dir)
elif num > num_A *0.6 and num < num_A*0.8:
copy2(fileName, test_a_dir)
else:
copy2(fileName, validation_a_dir)
num += 1
num=0
for i in index_list_b:
fileName = os.path.join(B_dir, B_all[i])
if num < num_B*0.6:
print(str(fileName))
copy2(fileName, train_b_dir)
elif num > num_B *0.6 and num < num_B*0.8:
copy2(fileName, test_b_dir)
else:
copy2(fileName, validation_b_dir)
num += 1
num=0
for i in index_list_e:
fileName = os.path.join(E_dir, E_all[i])
if num < num_E*0.6:
print(str(fileName))
copy2(fileName, train_e_dir)
elif num > num_E *0.6 and num < num_E*0.8:
copy2(fileName, test_e_dir)
else:
copy2(fileName, validation_e_dir)
num += 1
num=0
for i in index_list_g:
fileName = os.path.join(G_dir, G_all[i])
if num < num_G*0.6:
print(str(fileName))
copy2(fileName, train_g_dir)
elif num > num_G *0.6 and num < num_G*0.8:
copy2(fileName, test_g_dir)
else:
copy2(fileName, validation_g_dir)
num += 1
print('number of training a:',len(os.listdir('/Users/lixinyi/Desktop/BIA667final/data/train/A')))
print('number of training b:',len(os.listdir('/Users/lixinyi/Desktop/BIA667final/data/train/B')))
print('number of training e:',len(os.listdir('/Users/lixinyi/Desktop/BIA667final/data/train/E')))
print('number of training g:',len(os.listdir('/Users/lixinyi/Desktop/BIA667final/data/train/G')))
print('number of testing a:',len(os.listdir('/Users/lixinyi/Desktop/BIA667final/data/test/A')))
print('number of testing b:',len(os.listdir('/Users/lixinyi/Desktop/BIA667final/data/test/B')))
print('number of testing e:',len(os.listdir('/Users/lixinyi/Desktop/BIA667final/data/test/E')))
print('number of testing g:',len(os.listdir('/Users/lixinyi/Desktop/BIA667final/data/test/G')))
print('number of validation a:',len(os.listdir('/Users/lixinyi/Desktop/BIA667final/data/validation/A')))
print('number of validation b:',len(os.listdir('/Users/lixinyi/Desktop/BIA667final/data/validation/B')))
print('number of validation e:',len(os.listdir('/Users/lixinyi/Desktop/BIA667final/data/validation/E')))
print('number of validation g:',len(os.listdir('/Users/lixinyi/Desktop/BIA667final/data/validation/G')))
from keras.preprocessing.image import ImageDataGenerator
# All images will be rescaled by 1./255
train_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)
validation_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'/Users/lixinyi/Desktop/BIA667final/data/train',
batch_size=32,
class_mode='categorical',
color_mode = 'grayscale')
test_generator = train_datagen.flow_from_directory(
'/Users/lixinyi/Desktop/BIA667final/data/test',
batch_size=32,
class_mode='categorical',
color_mode = 'grayscale')
validation_generator = train_datagen.flow_from_directory(
'/Users/lixinyi/Desktop/BIA667final/data/validation',
batch_size=32,
class_mode='categorical',
color_mode = 'grayscale')
for data_batch, labels_batch in train_generator:
print('data batch shape:', data_batch.shape)
print('labels batch shape:', labels_batch.shape)
break
import matplotlib.pyplot as plt
plt.figure(figsize=(10,10))
plt.subplot(2,2,1)
plt.imshow(data_batch[5].reshape(256,256),cmap='gray')
plt.subplot(2,2,2)
plt.imshow(data_batch[10].reshape(256,256),cmap='gray')
plt.subplot(2,2,3)
plt.imshow(data_batch[15].reshape(256,256),cmap='gray')
plt.subplot(2,2,4)
plt.imshow(data_batch[20].reshape(256,256),cmap='gray')
plt.show()
###Output
_____no_output_____
###Markdown
Data Augmentation
###Code
datagen = ImageDataGenerator(
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.5,
zoom_range=0.4,
horizontal_flip=True,
fill_mode='wrap')
from keras.preprocessing import image
fnames = [os.path.join('/Users/lixinyi/Desktop/BIA667final/data/train/B', fname) for fname in os.listdir('/Users/lixinyi/Desktop/BIA667final/data/train/B')]
# We pick one image to "augment"
img_path = fnames[3]
# Read the image and resize it
img = image.load_img(img_path, target_size=(256,256))
# Convert it to a Numpy array with shape (150, 150, 3)
x = image.img_to_array(img)
# Reshape it to (1, 150, 150, 3)
x = x.reshape((1,) + x.shape)
# The .flow() command below generates batches of randomly transformed images.
# It will loop indefinitely, so we need to `break` the loop at some point!
i = 0
for batch in datagen.flow(x, batch_size=1):
plt.figure(i)
imgplot = plt.imshow(image.array_to_img(batch[0]))
i += 1
if i % 3 == 0:
break
plt.show()
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=50,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.5,
zoom_range=0.4,
horizontal_flip=True,
fill_mode='wrap')
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
# This is the target directory
train_dir,
# All images will be resized to 150x150
target_size=(512,512),
batch_size=32,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='categorical',
color_mode = 'grayscale')
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(512,512),
batch_size=32,
class_mode='categorical',
color_mode = 'grayscale')
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(512,512),
batch_size=32,
class_mode='categorical',
color_mode = 'grayscale')
from keras import layers
from keras import models
from keras.layers.core import Dropout
from IPython.display import Image
from keras.utils.vis_utils import model_to_dot
model_aug = models.Sequential()
model_aug.add(layers.Conv2D(20, (5, 5), activation='relu',
input_shape=(512,512, 1)))
model_aug.add(layers.MaxPooling2D((2, 2)))
model_aug.add(layers.Conv2D(50, (5, 5), activation='relu'))
model_aug.add(layers.MaxPooling2D((2, 2)))
model_aug.add(layers.Flatten())
model_aug.add(layers.Dense(4, activation='softmax'))
model_aug.summary()
from keras import optimizers
model_aug.compile(loss='categorical_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-4),
metrics=['acc'])
history_aug = model_aug.fit_generator(
train_generator,
steps_per_epoch=50,
epochs=20,
validation_data=validation_generator,
validation_steps=50)
acc = history_aug.history['acc']
val_acc = history_aug.history['val_acc']
loss = history_aug.history['loss']
val_loss = history_aug.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'b-', label='Training acc')
plt.plot(epochs, val_acc, 'r-', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'b-', label='Training loss')
plt.plot(epochs, val_loss, 'r-', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
model_aug.evaluate(test_generator)
###Output
18/18 [==============================] - 18s 1s/step
|
TradingAI/AI Algorithms in Trading/Lesson 16 - Decision Tree/Visualizing Your Tree Exercise/titanic_graphviz.ipynb | ###Markdown
How to Visualize Your Decision Tree In the previous workspace, you created a decision tree for the Titanic survival dataset. But what do you do if you want to inspect your tree visually, and make sure it makes logical sense? We'll look at how to do that in this workspace, using Graphviz open source graph visualization software. Graph visualization is a way of representing structural information as diagrams of abstract graphs and networks.We'll start by importing the same dataset, and taking the same steps we did earlier to split the data and train the tree.
###Code
# Import libraries necessary for this project
import numpy as np
import pandas as pd
from IPython.display import display # Allows the use of display() for DataFrames
# Pretty display for notebooks
%matplotlib inline
# Set a random seed
import random
random.seed(42)
# Load the dataset
in_file = 'titanic_data.csv'
full_data = pd.read_csv(in_file)
# Store the 'Survived' feature in a new variable and remove it from the dataset
outcomes = full_data['Survived']
features_raw = full_data.drop(['Survived'], axis = 1)
features = pd.get_dummies(features_raw)
features = features.fillna(0.0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(features, outcomes, test_size=0.2, random_state=42)
# Import the classifier from sklearn
from sklearn.tree import DecisionTreeClassifier
# TODO: Define the classifier, and fit it to the data
model = DecisionTreeClassifier()
model.fit(X_train, y_train)
# Making predictions
y_train_pred = model.predict(X_train)
y_test_pred = model.predict(X_test)
# Calculate the accuracy
from sklearn.metrics import accuracy_score
train_accuracy = accuracy_score(y_train, y_train_pred)
test_accuracy = accuracy_score(y_test, y_test_pred)
print('The training accuracy is', train_accuracy)
print('The test accuracy is', test_accuracy)
###Output
The training accuracy is 1.0
The test accuracy is 0.810055865922
###Markdown
Now, let's install the graphviz package.
###Code
!pip install graphviz
###Output
Collecting graphviz
Downloading https://files.pythonhosted.org/packages/83/cc/c62100906d30f95d46451c15eb407da7db201e30f42008f3643945910373/graphviz-0.14-py2.py3-none-any.whl
Installing collected packages: graphviz
Successfully installed graphviz-0.14
###Markdown
Now we'll import some relevant modules.
###Code
from sklearn.tree import export_graphviz
import graphviz
###Output
_____no_output_____
###Markdown
(TODO) Display the Tree GraphNow, use what you learned to export the graph in DOT format, and finally display it in the notebook.
###Code
# TODO: Export the graph to DOT format
# TODO: Use graphviz to create the graph
# TODO: Display the graph in the Jupyter notebook
dot_data = export_graphviz(model, out_file=None,
feature_names=X_train.columns,
class_names=['Survived', 'Died'],
filled=True, rounded=True,
special_characters=True)
graph = graphviz.Source(dot_data)
graph
###Output
_____no_output_____
###Markdown
This displays a rather large and detailed graph that is nonetheless helpful for making apparent a few aspects of our tree. The tree has made splits on features that aren't very useful for characterizing groups of people—for example, it's made splits based on individual people's names, which basically split out a single person. Based on your inspection of this graph, you may decide to change some of the input features and hyperparameters of your tree.In case you need help, be sure to check out the [solution notebook](titanic_graphviz_solution.ipynb).
###Code
import math
entropy_occupation = -(4/11 * math.log(4/11, 2) + 7/11 * math.log(7/11, 2)) # entropy target
entropy_gender = -(6/11 * math.log(6/11, 2) + 5/11 * math.log(5/11, 2))
entropy_app = -(5/11 * math.log(5/11, 2) + 3/11 * math.log(3/11, 2) + 3/11 * math.log(3/11, 2))
entropy_occupation
entropy_gender
entropy_app
entropy_occupation - entropy_app
###Output
_____no_output_____ |
Module01/01-08-Functions.ipynb | ###Markdown
FunctionsSo far in this course we've explored equations that perform algebraic operations to produce one or more results. A *function* is a way of encapsulating an operation that takes an input and produces exactly one ouput.For example, consider the following function definition:\begin{equation}f(x) = x^{2} + 2\end{equation}This defines a function named ***f*** that accepts one input (***x***) and returns a single value that is the result calculated by the expression *x2 + 2*.Having defined the function, we can use it for any input value. For example:\begin{equation}f(3) = 11\end{equation}You've already seen a few examples of Python functions, which are defined using the **def** keyword. However, the strict definition of an algebraic function is that it must return a single value. Here's an example of defining and using a Python function that meets this criteria:
###Code
# define a function to return x^2 + 2
def f(x):
return x**2 + 2
# call the function
f(3)
###Output
_____no_output_____
###Markdown
You can use functions in equations, just like any other term. For example, consider the following equation:\begin{equation}y = f(x) - 1\end{equation}To calculate a value for ***y***, we take the ***f*** of ***x*** and subtract 1. So assuming that ***f*** is defined as previously, given an ***x*** value of 4, this equation returns a ***y*** value of **17** (*f*(4) returns 42 + 2, so 16 + 2 = 18; and then the equation subtracts 1 to give us 17). Here it is in Python:
###Code
x = 4
y = f(x) - 1
print(y)
###Output
17
###Markdown
Of course, the value returned by a function depends on the input; and you can graph this with the iput (let's call it ***x***) on one axis and the output (***f(x)***) on the other.
###Code
%matplotlib inline
import numpy as np
from matplotlib import pyplot as plt
# Create an array of x values from -100 to 100
x = np.array(range(-100, 101))
# Set up the graph
plt.xlabel('x')
plt.ylabel('f(x)')
plt.grid()
# Plot x against f(x)
plt.plot(x,f(x), color='purple')
plt.show()
###Output
_____no_output_____
###Markdown
As you can see (if you hadn't already figured it out), our function is a *quadratic function* - it returns a squared value that results in a parabolic graph when the output for multiple input values are plotted. Bounds of a FunctionSome functions will work for any input and may return any output. For example, consider the function ***u*** defined here:\begin{equation}u(x) = x + 1\end{equation}This function simply adds 1 to whatever input is passed to it, so it will produce a defined output for any value of ***x*** that is a *real* number; in other words, any "regular" number - but not an *imaginary* number like √-1, or ∞ (infinity). You can specify the set of real numbers using the symbol ${\rm I\!R}$ (note the double stroke). The values that can be used for ***x*** can be expressed as a *set*, which we indicate by enclosing all of the members of the set in "{...}" braces; so to indicate the set of all possible values for x such that x is a member of the set of all real numbers, we can use the following expression:\begin{equation}\{x \in \rm I\!R\}\end{equation} Domain of a FunctionWe call the set of numbers for which a function can return value it's *domain*, and in this case, the domain of ***u*** is the set of all real numbers; which is actually the default assumption for most functions.Now consider the following function ***g***:\begin{equation}g(x) = (\frac{12}{2x})^{2}\end{equation}If we use this function with an ***x*** value of **2**, we would get the output **9**; because (12 ÷ (2•2))2 is 9. Similarly, if we use the value **-3** for ***x***, the output will be **4**. However, what happens when we apply this function to an ***x*** value of **0**? Anything divided by 0 is undefined, so the function ***g*** doesn't work for an ***x*** value of 0.So we need a way to denote the domain of the function ***g*** by indicating the input values for which a defined output can be returned. Specifically, we need to restrict ***x*** to a specific list of values - specifically any real number that is not 0. To indicate this, we can use the following notation:\begin{equation}\{x \in \rm I\!R\;\;|\;\; x \ne 0 \}\end{equation}This is interpreted as *Any value for x where x is in the set of real numbers such that x is not equal to 0*, and we can incorporate this into the function's definition like this:\begin{equation}g(x) = (\frac{12}{2x})^{2}, \{x \in \rm I\!R\;\;|\;\; x \ne 0 \}\end{equation}Or more simply:\begin{equation}g(x) = (\frac{12}{2x})^{2},\;\; x \ne 0\end{equation}When you plot the output of a function, you can indicate the gaps caused by input values that are not in the function's domain by plotting an empty circle to show that the function is not defined at this point:
###Code
%matplotlib inline
# Define function g
def g(x):
if x != 0:
return (12/(2*x))**2
# Plot output from function g
import numpy as np
from matplotlib import pyplot as plt
# Create an array of x values from -100 to 100
x = range(-100, 101)
# Get the corresponding y values from the function
y = [g(a) for a in x]
# Set up the graph
plt.xlabel('x')
plt.ylabel('g(x)')
plt.grid()
# Plot x against g(x)
plt.plot(x,y, color='purple')
# plot an empty circle to show the undefined point
plt.plot(0,g(0.0000001), color='purple', marker='o', markerfacecolor='w', markersize=8)
plt.show()
###Output
_____no_output_____
###Markdown
Note that the function works for every value other than 0; so the function is defined for x = 0.000000001, and for x = -0.000000001; it only fails to return a defined value for exactly 0.OK, let's take another example. Consider this function:\begin{equation}h(x) = 2\sqrt{x}\end{equation}Applying this function to a non-negative ***x*** value returns a meaningful output; but for any value where ***x*** is negative, the output is undefined.We can indicate the domain of this function in its definition like this:\begin{equation}h(x) = 2\sqrt{x}, \{x \in \rm I\!R\;\;|\;\; x \ge 0 \}\end{equation}This is interpreted as *Any value for x where x is in the set of real numbers such that x is greater than or equal to 0*.Or, you might see this in a simpler format:\begin{equation}h(x) = 2\sqrt{x},\;\; x \ge 0\end{equation}Note that the symbol ≥ is used to indicate that the value must be *greater than **or equal to*** 0; and this means that **0** is included in the set of valid values. To indicate that the value must be *greater than 0, **not including 0***, use the > symbol. You can also use the equivalent symbols for *less than or equal to* (≤) and *less than* (<).When plotting a function line that marks the end of a continuous range, the end of the line is shown as a circle, which is filled if the function includes the value at that point, and unfilled if it does not.Here's the Python to plot function ***h***:
###Code
%matplotlib inline
def h(x):
if x >= 0:
import numpy as np
return 2 * np.sqrt(x)
# Plot output from function h
import numpy as np
from matplotlib import pyplot as plt
# Create an array of x values from -100 to 100
x = range(-100, 101)
# Get the corresponding y values from the function
y = [h(a) for a in x]
# Set up the graph
plt.xlabel('x')
plt.ylabel('h(x)')
plt.grid()
# Plot x against h(x)
plt.plot(x,y, color='purple')
# plot a filled circle at the end to indicate a closed interval
plt.plot(0, h(0), color='purple', marker='o', markerfacecolor='purple', markersize=8)
plt.show()
###Output
_____no_output_____
###Markdown
Sometimes, a function may be defined for a specific *interval*; for example, for all values between 0 and 5:\begin{equation}j(x) = x + 2,\;\; x \ge 0 \text{ and } x \le 5\end{equation}In this case, the function is defined for ***x*** values between 0 and 5 *inclusive*; in other words, **0** and **5** are included in the set of defined values. This is known as a *closed* interval and can be indicated like this:\begin{equation}\{x \in \rm I\!R\;\;|\;\; 0 \le x \le 5 \}\end{equation}It could also be indicated like this:\begin{equation}\{x \in \rm I\!R\;\;|\;\; [0,5] \}\end{equation}If the condition in the function was **x > 0 and x < 5**, then the interval would be described as *open* and 0 and 5 would *not* be included in the set of defined values. This would be indicated using one of the following expressions:\begin{equation}\{x \in \rm I\!R\;\;|\;\; 0 \lt x \lt 5 \}\end{equation}\begin{equation}\{x \in \rm I\!R\;\;|\;\; (0,5) \}\end{equation}Here's function ***j*** in Python:
###Code
%matplotlib inline
def j(x):
if x >= 0 and x <= 5:
return x + 2
# Plot output from function j
import numpy as np
from matplotlib import pyplot as plt
# Create an array of x values from -100 to 100
x = range(-100, 101)
y = [j(a) for a in x]
# Set up the graph
plt.xlabel('x')
plt.ylabel('j(x)')
plt.grid()
# Plot x against k(x)
plt.plot(x, y, color='purple')
# plot a filled circle at the ends to indicate an open interval
plt.plot(0, j(0), color='purple', marker='o', markerfacecolor='purple', markersize=8)
plt.plot(5, j(5), color='purple', marker='o', markerfacecolor='purple', markersize=8)
plt.show()
###Output
_____no_output_____
###Markdown
Now, suppose we have a function like this:\begin{equation}k(x) = \begin{cases} 0, & \text{if } x = 0, \\ 1, & \text{if } x = 100\end{cases}\end{equation}In this case, the function has highly restricted domain; it only returns a defined output for 0 and 100. No output for any other ***x*** value is defined. In this case, the set of the domain is:\begin{equation}\{0,100\}\end{equation}Note that this does not include all real numbers, it only includes 0 and 100.When we use Python to plot this function, note that it only makes sense to plot a scatter plot showing the individual values returned, there is no line in between because the function is not continuous between the values within the domain.
###Code
%matplotlib inline
def k(x):
if x == 0:
return 0
elif x == 100:
return 1
# Plot output from function k
from matplotlib import pyplot as plt
# Create an array of x values from -100 to 100
x = range(-100, 101)
# Get the k(x) values for every value in x
y = [k(a) for a in x]
# Set up the graph
plt.xlabel('x')
plt.ylabel('k(x)')
plt.grid()
# Plot x against k(x)
plt.scatter(x, y, color='purple')
plt.show()
###Output
_____no_output_____
###Markdown
Range of a FunctionJust as the domain of a function defines the set of values for which the function is defined, the *range* of a function defines the set of possible outputs from the function.For example, consider the following function:\begin{equation}p(x) = x^{2} + 1\end{equation}The domain of this function is all real numbers. However, this is a quadratic function, so the output values will form a parabola; and since the function has no negative coefficient or constant, it will be an upward opening parabola with a vertex that has a y value of 1.So what does that tell us? Well, the minimum value that will be returned by this function is 1, so it's range is:\begin{equation}\{p(x) \in \rm I\!R\;\;|\;\; p(x) \ge 1 \}\end{equation}Let's create and plot the function for a range of ***x*** values in Python:
###Code
%matplotlib inline
# define a function to return x^2 + 1
def p(x):
return x**2 + 1
# Plot the function
import numpy as np
from matplotlib import pyplot as plt
# Create an array of x values from -100 to 100
x = np.array(range(-100, 101))
# Set up the graph
plt.xlabel('x')
plt.ylabel('p(x)')
plt.grid()
# Plot x against f(x)
plt.plot(x,p(x), color='purple')
plt.show()
###Output
_____no_output_____ |
openbus_19_id_al_adha.ipynb | ###Markdown
Imports and config
###Code
# Put these at the top of every notebook, to get automatic reloading and inline plotting
%reload_ext autoreload
%autoreload 2
%matplotlib inline
import pandas as pd
import numpy as np
import partridge as ptg
import datetime
import os
import matplotlib.pyplot as plt
#import gtfs_utils as gu
#import gtfstk
import altair as alt
alt.renderers.enable('notebook')
###Output
_____no_output_____
###Markdown
Read files
###Code
import glob
rs = (pd.concat([pd.read_pickle(f, compression='gzip')
for f in glob.glob('data\\gtfs_stats_MOD_ffill\\2018-*_route_stats.pkl.gz')])
.assign(date = lambda x: pd.to_datetime(x.date))
.assign(dayofweek = lambda x: x['date'].dt.strftime('%A')))
rs.shape
rs.head().T
total_trips = (rs.set_index('date').resample('D').num_trips.sum().reset_index().groupby(pd.Grouper(key='date', freq='W-SUN')).agg(['min', 'mean', 'max', 'sum']).reset_index())
total_trips.columns = ['date', 'min', 'mean', 'max', 'sum']
total_trips.head()
weekday_total = (rs.set_index('date').resample('D').num_trips.sum().reset_index()
.assign(dayofweek = lambda x: x.date.dt.strftime('%A'))
.set_index('date'))
weekday_total = (weekday_total[~weekday_total.dayofweek.isin(['Friday', 'Saturday'])]
.groupby(pd.Grouper(level=0, freq='W-SUN'))
.agg(['min', 'mean', 'max', 'idxmax', 'idxmin'])
.reset_index())
weekday_total.columns = ['date', 'weekday_min', 'weekday_mean', 'weekday_max', 'max_weekday', 'min_weekday']
alltots = total_trips.merge(weekday_total)
points = alt.Chart(total_trips).mark_point(filled=True).encode(
alt.Y(
'mean',
scale=alt.Scale(zero=False),
axis=alt.Axis(title='Daily Trips')
),
x='date:T',
color=alt.value('black')
)
error_bars = alt.Chart(total_trips).mark_rule().encode(
x='date:T',
y='min',
y2='max'
)
(points + error_bars).properties(height=600, width=800)
weekday_min_color = alt.condition("datum.weekday_min != datum.min",
alt.value("#06982d"),
alt.value("#ae1325"))
rule = alt.Chart(alltots).mark_rule().encode(
alt.X(
'date:T',
),
alt.Y(
'min',
scale=alt.Scale(zero=False),
axis=alt.Axis(title='Daily Trips')
),
alt.Y2('max'),
color=alt.value("#ae1325"),
tooltip=[alt.Tooltip('date:T', format='%A, %B %e'),'min', 'max', 'mean']
)
bar = alt.Chart(alltots).mark_bar().encode(
x='date:T',
y='weekday_min',
y2='weekday_max',
color=alt.value("#ae1325"),
tooltip=[alt.Tooltip('date:T', format='%A, %B %e'), 'weekday_mean', alt.Tooltip('min_weekday:T', format='%A, %B %e'), alt.Tooltip('max_weekday:T', format='%A, %B %e')]
)
(rule + bar + points).properties(height=600, width=800)
wd_avg = rs.groupby('dayofweek').num_trips.sum() / rs.groupby('dayofweek').date.nunique()
wd_avg
eid_routes = rs[(rs.date>'2018-08-19') & (rs.date<'2018-08-25')]
week_before_routes = rs[(rs.date<='2018-08-17') & (rs.date>'2018-08-12')]
id_num_trips = eid_routes.groupby('dayofweek').num_trips.sum()
id_num_trips
week_before = week_before_routes.groupby('dayofweek').num_trips.sum()
comp = pd.concat([wd_avg, week_before, id_num_trips], axis=1, keys = ['avg', 'week_before', 'eid']).reindex(['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'])
comp[['week_before', 'eid']].plot(figsize=(15,10), kind='bar')
sums = comp.loc[['Tuesday', 'Wednesday', 'Thursday']].sum()
sums['eid'] / sums['week_before']
comp['pct_change'] = (100*(1-np.round((comp.week_before/comp.eid), 2))).astype(int)
comp
id_num_trips_zone = eid_routes.groupby(['dayofweek', 'start_zone']).num_trips.sum()
week_before_zone = week_before_routes.groupby(['dayofweek', 'start_zone']).num_trips.sum()
comp_zone = (pd.concat([week_before_zone, id_num_trips_zone], axis=1, keys = ['week_before', 'eid'])
#.stack()
.reset_index()
#.rename(columns={'level_2': 'week', 0: 'num_trips'})
)
comp_zone['pct_change'] = (-100*(1-np.round((comp_zone.eid/comp_zone.week_before), 2))).astype(int)
sorter = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday']
comp_zone.dayofweek = comp_zone.dayofweek.astype("category")
comp_zone.dayofweek.cat.set_categories(sorter, inplace=True)
comp_zone=comp_zone.sort_values('dayofweek')
comp_zone
alt.Chart(comp_zone[~comp_zone.dayofweek.isin(('Saturday', 'Sunday', 'Monday'))]).mark_point().encode(
x = alt.X('dayofweek:N', ),
y = 'pct_change',
color = 'start_zone:N',
tooltip = 'start_zone',
).properties(height=600, width=800)
comp_zone.sort_values('pct_change').to_csv('data/eid_comp_zone.csv')
id_num_trips_zone_inside = eid_routes[eid_routes.start_zone==eid_routes.end_zone].groupby(['dayofweek', 'start_zone', 'agency_name']).num_trips.sum()
week_before_zone_inside = week_before_routes[week_before_routes.start_zone==week_before_routes.end_zone].groupby(['dayofweek', 'start_zone', 'agency_name']).num_trips.sum()
comp_zone_inside = (pd.concat([week_before_zone_inside, id_num_trips_zone_inside], axis=1, keys = ['week_before', 'eid'])
#.stack()
.reset_index()
#.rename(columns={'level_2': 'week', 0: 'num_trips'})
)
comp_zone_inside['pct_change'] = (-100*(1-np.round((comp_zone_inside.eid/comp_zone_inside.week_before), 2)))#.astype(int)
sorter = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday']
comp_zone_inside.dayofweek = comp_zone_inside.dayofweek.astype("category")
comp_zone_inside.dayofweek.cat.set_categories(sorter, inplace=True)
comp_zone_inside=comp_zone_inside.sort_values('dayofweek')
comp_zone_inside.sort_values('pct_change')
comp_zone_inside.sort_values('pct_change').to_csv('data/eid_comp_zone_inside_agency.csv')
CLUSTER_TO_LINE = 'data/archive/2018-08-22/ClusterToLine.zip'
def get_cluster_to_line_df(path):
cols = ['agency_name', 'route_id', 'route_short_name',
'cluster_name', 'from_date', 'to_date', 'cluster_id',
'route_type', 'route_type_desc', 'cluster_sub_desc', 'EXTRA']
ctl = (pd.read_csv(path, encoding='windows-1255',
skiprows=[0], header=None, names = cols)
.drop(columns=['EXTRA']))
return ctl
ctl = get_cluster_to_line_df(CLUSTER_TO_LINE)
ctl.route_id = ctl.route_id.astype(str)
ctl.shape
rs.groupby('date').size().head()
ctl.head()
rs[rs.date=='2018-08-22']
crs = rs.merge(ctl[['route_id', 'cluster_name', 'route_type_desc']], on='route_id', how='left')
eid_routes = crs[crs.date>'2018-08-19']
week_before_routes = crs[(crs.date<='2018-08-19') & (crs.date>'2018-08-12')]
id_num_trips_cluster = eid_routes.groupby(['dayofweek', 'cluster_name', 'route_type_desc']).num_trips.sum()
week_before_cluster = week_before_routes.groupby(['dayofweek', 'cluster_name', 'route_type_desc']).num_trips.sum()
comp_cluster = (pd.concat([week_before_cluster, id_num_trips_cluster], axis=1, keys = ['week_before', 'eid'])
#.stack()
.reset_index()
.fillna(0)
#.rename(columns={'level_2': 'week', 0: 'num_trips'})
)
comp_cluster
comp_cluster['pct_change'] = (-100*(1-np.round((comp_cluster.eid.divide(comp_cluster.week_before, fill_value=0)), 3)))#.replace(np.inf, np.nan).dropna().astype(int)
sorter = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
comp_cluster.dayofweek = comp_cluster.dayofweek.astype("category")
comp_cluster.dayofweek.cat.set_categories(sorter, inplace=True)
comp_cluster=comp_cluster.sort_values('dayofweek')
comp_cluster
comp_cluster[comp_cluster.week_before>0].sort_values('pct_change').to_csv('data/eid_comp_cluster.csv')
alt.Chart(comp_cluster[~comp_cluster.dayofweek.isin(('Saturday', 'Sunday', 'Monday'))]).mark_point().encode(
x = alt.X('dayofweek:N', ),
y = 'pct_change',
color = 'cluster_name:N',
tooltip = 'cluster_name',
).properties(height=600, width=800)
routes_to_check = ['149','271', '300', '301', '350', '361', '369', '402',
'417', '433', '437', '470', '480', '500', '826',
'836', '845', '947']
rids_149 = ['7716', '7718']#, '7719', '7720']
rids = ['4440', '4444', '4446', '4443', '16723',
'7393', '7395', '14180', '14181', '14183', '14184', '14185',
#'13690', '7396', '7398', '7400', '7402',
'6588', '6601', '16314', '17845', '13666', '19577', '17846',
'4474', '4475',
'8151', '8153', '8162', '8164', '8165',
'17143', '6642', '16241', '16242', '16245', '6635', '16240', '16244', '6646', '6649',
'6656', '6660', '6661', '16415',
'6952', '6954',
'6966', '6967', '6969',
'7005', '7007', '7008', '7012', '7013', '7014', '7015', '7016',
'10958', '7020', '7022', '7023', '7024', '7026', '7027', '7028', '7030', '7033', '7034', '15337',
'19671', '19672',
'15123', '19731', '7072', '7083', '7081', '7079',
'13347', '7111', '7112',
'7159', '7161',
'19740', '19741', '7220', '7221', '7224', '7226', '13136', '7225'
]
(week_before_routes[week_before_routes.route_short_name==routes_to_check[0]]
.drop_duplicates(subset=['route_id', 'route_short_name', 'route_long_name'])
#.route_id.tolist()
)
rids = rids_149
eid_rtc = eid_routes[eid_routes.route_id.isin(rids)]
wb_rtc = week_before_routes[week_before_routes.route_id.isin(rids)]
id_num_trips_zone = eid_rtc.groupby(['dayofweek', 'route_short_name']).num_trips.sum()
week_before_zone = wb_rtc.groupby(['dayofweek', 'route_short_name']).num_trips.sum()
comp_route = (pd.concat([week_before_zone, id_num_trips_zone], axis=1, keys = ['week_before', 'eid'])
#.stack()
.reset_index()
#.rename(columns={'level_2': 'week', 0: 'num_trips'})
)
comp_route['pct_change'] = (-100*(1-np.round((comp_route.eid/comp_route.week_before), 2))).astype(int)
sorter = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
comp_route.dayofweek = comp_route.dayofweek.astype("category")
comp_route.dayofweek.cat.set_categories(sorter, inplace=True)
comp_route=comp_route.sort_values('dayofweek')
comp_route[comp_route['pct_change']<0]
ts = (pd.concat([pd.read_pickle(f, compression='gzip')
for f in glob.glob('data\\gtfs_stats_MOD_ffill\\2018-08-[1-2]?_trip_stats.pkl.gz')])
.assign(date = lambda x: pd.to_datetime(x.date))
.assign(dayofweek = lambda x: x['date'].dt.strftime('%A')))
eid_trips = ts[(ts.date>'2018-08-19') & (ts.date<'2018-08-25') & (ts.route_id.isin(rids))]
week_before_trips = ts[(ts.date<='2018-08-17') & (ts.date>'2018-08-12') & (ts.route_id.isin(rids))]
m = week_before_trips[['dayofweek', 'start_time', 'date']].merge(eid_trips[['dayofweek', 'start_time', 'date']], how='outer', on=['start_time', 'dayofweek'])
m['hour'] = m.start_time.str.split(':').apply(lambda x: int(x[0]))
m['week_before'] = m.date_x.notna()
m['eid'] = m.date_y.notna()
m.loc[(m.dayofweek=='Tuesday') & ((m.hour>=13) & (m.hour<=19))].sort_values('start_time').drop(['dayofweek', 'date_x', 'date_y', 'hour'], axis=1)
m['di'] = m.dayofweek.replace({'Monday': 2, 'Tuesday': 3, 'Wednesday': 4, 'Thursday': 5, 'Friday': 6})
m.sort_values(['di', 'start_time']).drop(['di', 'date_x', 'date_y', 'hour'], axis=1).to_csv('data/eid_149.csv', index=False)
alt.Chart(comp_route[~comp_route.dayofweek.isin(('Saturday', 'Sunday', 'Monday'))]).mark_point().encode(
x = alt.X('dayofweek:N', ),
y = 'pct_change',
color = 'route_short_name:N',
tooltip = 'route_short_name',
).properties(height=600, width=800)
id_num_trips_agency = eid_routes.groupby(['dayofweek', 'agency_name']).num_trips.sum()
week_before_agency = week_before_routes.groupby(['dayofweek', 'agency_name']).num_trips.sum()
comp_agency = (pd.concat([week_before_agency, id_num_trips_agency], axis=1, keys = ['week_before', 'eid'])
#.stack()
.reset_index()
#.rename(columns={'level_2': 'week', 0: 'num_trips'})
)
comp_agency['pct_change'] = (-100*(1-np.round((comp_agency.eid/comp_agency.week_before), 2))).astype(int)
sorter = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
comp_agency.dayofweek = comp_agency.dayofweek.astype("category")
comp_agency.dayofweek.cat.set_categories(sorter, inplace=True)
comp_agency=comp_agency.sort_values('dayofweek')
comp_agency
alt.Chart(comp_agency[~comp_agency.dayofweek.isin(('Saturday', 'Sunday', 'Monday', 'Friday'))]).mark_bar().encode(
#color = 'dayofweek:N',
y = 'mean(pct_change)',
x = 'agency_name:N',
tooltip = ['agency_name'],
).properties(height=600, width=800)
###Output
_____no_output_____ |
machine_learning_pills/01_supervised/07_fraud_detection.ipynb | ###Markdown
Credit Card Fraud Detection Dataset Sbilanciati[Image Credits](https://www.techexplorist.com/false-positive-reduction-credit-card-fraud-detection/17191/) Obiettivo del notebookL'obiettivo di questo notebook è proprio la scoperta di frodi su carte di credito in un dataset molto sbilanciato.Quando si tratta di individuare frodi, guasti o comunque eventi rari i nostri dataset saranno probabilmente molto sbilanciati, ovvero una classe sarà molto più rappresentata delle altre.In questo caso il nostro stimatore potrebbe non individuare nessun pattern e classificare tutte le osservazioni nella medesima classe. Per ovviare a questo problema si possono usare le segiuenti tecniche:* **undersampling**: eliminare molte osservazioni della classe più rappresentata in modo da bilanciare il dataset. Consigliato quando si hanno molte osservazioni.* **oversampling**: duplicare le osservazioni della classe con eventi rari per bilanciare il dataset. Io personalmente non consiglio questa strategia, si introduce molto rumore che potrebbe portare ad overfitting, se proprio non si può applicare l'undersampling il mio consiglio è di fare oversampling *mitigando* lo sbilancimento, ad esempio se il rapporto è 99% - 1% provare prima a portarlo a 90% - 10%.[Image Credits](https://towardsdatascience.com/having-an-imbalanced-dataset-here-is-how-you-can-solve-it-1640568947eb)Per applicare queste trasformazioni in python esiste una libreria, [imblearn](https://imbalanced-learn.readthedocs.io/en/stable/api.html) che mette a disposizione molti tipi di bilanciamento. Nel nostro caso applicheremo un bilanciamento casuale delle osservazioni quindi non la useremo.Come stimatore verrà usata la regressione logistica.[Image Credits](http://dataaspirant.com/2017/03/02/how-logistic-regression-model-works/) Il DatasetIl dataset utilizzato è stato generato tramite Paysim, un simulatore di transazioni di carte di credito.Per motivi di spazio di github ne carico solo una minima parte, [l'intero dataset può essere scaricato da kaggle](https://www.kaggle.com/ntnu-testimon/paysim1).I campi del dataset sono:* step: simulazione dell'unità di tempo* type: il tipo di movimento. CASH-IN, CASH-OUT, DEBIT, PAYMENT and TRANSFER.* amount: ammontare della transazione* nameOrig: cliente che ha iniziato la transazione* oldbalanceOrg: saldo iniziale al momento della transazione* newbalanceOrig: saldo dopo la transazione* nameDest: destinatario della transazione* oldbalanceDest: saldo iniziale del ricevente* newbalanceDest: saldo del ricevente dopo la transazione* isFraud: variabile binaria che etichetta la frode (1) e il movimento regolare (0)* isFlaggedFraud: variabile binaria che controlla le anomalie, per anomalia in questo caso si intende un movimento di oltre 200K in una singola transazione. Caricamento del dataset***Se stai usando il notebook su Colab esegui le prossime due celle, altrimenenti vai direttamente al caricamento con *read_csv* inserendo il path del tuo file *Paysim.csv***
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
# Authenticate and create the PyDrive client.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
link = 'https://drive.google.com/YOURPATH'
fluff, id = link.split('=')
downloaded = drive.CreateFile({'id':id})
downloaded.GetContentFile('Paysim.csv')
df = pd.read_csv("Paysim.csv")
df.head()
###Output
_____no_output_____
###Markdown
Analisi del datasetPer prima cosa vediamo la percentuale di frodi nel dataset
###Code
fraud_count = df.isFraud.value_counts()
print("Not Fraud: ", fraud_count[0])
print("Fraud: ", fraud_count[1])
print("\n")
print("Percentuale frodi: ", round(fraud_count[1]/fraud_count.sum())*100,2, "%")
###Output
_____no_output_____
###Markdown
Il numero di frodi è bassissimo ma fortunatamente abbiamo abbastanza casi per applicare l'undersampling.Iniziamo col vedere che alcune variabili non sono informative per noi, possono essere tranquillamente eliminate:* step* nameOrig* nameDest
###Code
df.drop(columns = ['step', 'nameOrig', 'nameDest'], inplace = True)
###Output
_____no_output_____
###Markdown
Analisi delle variabiliEsercizio. Cercate di trovare relazioni nascoste tra i dati.* Influenza sulla variabile target.* Relazioni tra i regressori.* Analisi graficaVedremo a lezione se con le soluzioni proposte il modello avrà performance migliori. Preprocessing Missing ValuesNon sono presenti valori mancanti (il dataset è frutto di un generatore).
###Code
df.info()
###Output
_____no_output_____
###Markdown
TrasformazioniAbbiamo tre tipologie di variabili:* binaria* numerica continua* categoricaPer la variabile binaria non c'è nessun tipo di traformazione da fare.Per le variabili numeriche applicheremo una normalizzazione per portarle tutte nel range [0,1].Studiamo la variabile categorica per capire che trasformazione applicare.
###Code
df.type.value_counts()
###Output
_____no_output_____
###Markdown
*type* ha 4 valori, quindi possiamo applicare un One-Hot Encoding.Per applicare le trasformazioni useremo uno strumento che rende il codice riutilizzabile ed elegante, il [ColumnTransormer](https://scikit-learn.org/stable/modules/generated/sklearn.compose.ColumnTransformer.html)
###Code
from sklearn.preprocessing import OneHotEncoder, MinMaxScaler
from sklearn.compose import ColumnTransformer
# scelta degli attributi
cat_attribs = ["type"]
num_attribs = list(df._get_numeric_data().columns[0:-2]) # prendi tutte le colonne di tipo numerico e restituisce i nomi, tranne le ultime due
###Output
_____no_output_____
###Markdown
Il ColumnTransformer non è altro che una lista di operazioni da compiere, ogni elemento della lista è composto da:* nome del passo* traformazione da applicare* colonne su cui applicarlaLa totale integrazione con scikit-learn fa si che questo elemento abbia gli stessi metodi che abbiamo visto fino ad ora:* fit* transform* fit_transforme basta applicarlo a tutto il dataset, questo riduce di molto la probabilità di errore dovute a replicazione del codice.
###Code
transform = ColumnTransformer([
("num", MinMaxScaler(), num_attribs),
("cat", OneHotEncoder(), cat_attribs),
])
df_prepared = transform.fit_transform(df)
df_prepared[0]
###Output
_____no_output_____
###Markdown
Il trasformer ci restituisce una matrice che dovremo riconvertire in DataFrame per applicare le successive operazioni.
###Code
df_complete = pd.concat([pd.DataFrame(df_prepared), df[["isFraud", "isFlaggedFraud"]]], axis =1 )
df_complete.head()
# esercizio: mettere il nome alle colonne
###Output
_____no_output_____
###Markdown
Modelling: dataset sbilanciato* baseline model* reglog
###Code
from sklearn.model_selection import train_test_split
X = df_complete.drop("isFraud", axis=1).copy()
y = df_complete["isFraud"]
###Output
_____no_output_____
###Markdown
Visto che il dataset è fortemente sbilanciato applichiamo un campionamento stratificato sulla variabile target così da evitare che il train (o il test) set non presentino nessuna frode.
###Code
train_x, test_x, train_y, test_y = train_test_split(X, y, test_size = 0.3, stratify = y, random_state = 42)
###Output
_____no_output_____
###Markdown
Baseline modelIl baseline model è il nostro punto di partenza, in questo caso noi vogliamo un modello che classifica sempre la stessa etichetta. In questo caso l'accuratezza sarà altissima ma a noi interessa confrontarci su altre metriche:* precision* recall
###Code
from sklearn.dummy import DummyClassifier
dc = DummyClassifier(strategy = "most_frequent")
dc.fit(train_x, train_y)
yhat_dc = dc.predict(test_x)
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit(train_x, train_y)
yhat_lr = lr.predict(test_x)
###Output
_____no_output_____
###Markdown
Valutazioni
###Code
from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score, classification_report
print("BASELINE MODEL", "\n")
print(confusion_matrix(test_y, yhat_dc))
print("\n")
print("Accuracy: ", accuracy_score(test_y, yhat_dc))
print("Precision: ", precision_score(test_y, yhat_dc))
print("Recall: ", recall_score(test_y, yhat_dc))
print("\n")
print(classification_report(test_y, yhat_dc))
print("LOGISTIC REGRESSION", "\n")
print(confusion_matrix(test_y, yhat_lr))
print("\n")
print("Accuracy: ", accuracy_score(test_y, yhat_lr))
print("Precision: ", precision_score(test_y, yhat_lr))
print("Recall: ", recall_score(test_y, yhat_lr))
print("\n")
print(classification_report(test_y, yhat_lr))
###Output
_____no_output_____
###Markdown
Bilanciare il datasetEffueremo un undersamplig completamente casuale, gli step sono:* dividere il dataset secondo la variabile target* creare un nuovo dataset con le osservazioni, estratte casualmente dal dataset più grande in modo che abbia lo stesso numero di ossevazioni della classe meno rappresentata* concatenare**N.B. Per l'esercizio abbiamo bilanciato perfettamente, nel mondo reale è più probabile che capiti di usare una percentuale diversa**
###Code
not_faud = df_complete[df_complete["isFraud"]==0].copy()
fraud = df_complete[df_complete["isFraud"]==1].copy()
not_fraud_under = not_faud.sample(fraud.shape[0], replace=True, random_state=42)
balanced_dataset = pd.concat([not_fraud_under, fraud], axis = 0)
balanced_dataset.isFraud.value_counts()
###Output
_____no_output_____
###Markdown
estraiamo train e test set, in questo caso non ci servirà stratificare
###Code
X_bal = balanced_dataset.drop("isFraud", axis=1).copy()
y_bal = balanced_dataset["isFraud"]
balanced_train_x, balanced_test_x, balanced_train_y, balanced_test_y = train_test_split(X_bal, y_bal, test_size = 0.3, random_state = 42)
###Output
_____no_output_____
###Markdown
Modelling: dataset bilanciato* baseline model* reglog
###Code
# cambia la strategia del classificatore baseline, non più il più frequente visto che le classi sono bilanciate
dc_b = DummyClassifier()
dc_b.fit(balanced_train_x, balanced_train_y)
yhat_dc_b = dc_b.predict(balanced_test_x)
lr_b = LogisticRegression()
lr_b.fit(balanced_train_x, balanced_train_y)
yhat_lr_b = lr.predict(balanced_test_x)
###Output
_____no_output_____
###Markdown
Valutazioni
###Code
print("BASELINE MODEL", "\n")
print(confusion_matrix(balanced_test_y, yhat_dc_b))
print("\n")
print("Accuracy: ", accuracy_score(balanced_test_y, yhat_dc_b))
print("Precision: ", precision_score(balanced_test_y, yhat_dc_b))
print("Recall: ", recall_score(balanced_test_y, yhat_dc_b))
print("\n")
print(classification_report(balanced_test_y, yhat_dc_b))
print("LOGISTIC REGRESSION", "\n")
print(confusion_matrix(balanced_test_y, yhat_lr_b))
print("\n")
print("Accuracy: ", accuracy_score(balanced_test_y, yhat_lr_b))
print("Precision: ", precision_score(balanced_test_y, yhat_lr_b))
print("Recall: ", recall_score(balanced_test_y, yhat_lr_b))
print("\n")
print(classification_report(balanced_test_y, yhat_lr_b))
###Output
_____no_output_____ |
Tools/PostProcessing/plot_nci_growth_rate.ipynb | ###Markdown
OverviewThis notebook calculates and plots: * $E_z$ field distribution at time_start and time_end;* total EM energy evolution in time;* NCI growth rate, $Im( \omega )/\omega_{p,r}$, calculated between time_start and time_end (within the linear growth of the total energy).
###Code
import matplotlib.pyplot as plt
from scipy.constants import c
import numpy as np
import scipy.constants as scc
import yt ; yt.funcs.mylog.setLevel(50)
import glob
%matplotlib inline
###Output
_____no_output_____
###Markdown
Plot NCI growth rate
###Code
path_wx = 'path to diags folder'
file_list_warpx = glob.glob(path_wx + 'diag1?????')
iterations_warpx = [ int(file_name[len(file_name)-5:]) for file_name in file_list_warpx ]
def calculate_parameters(path):
iteration=200
dsx = yt.load( path + 'diag1%05d/' %iteration )
dxx = dsx.domain_width/dsx.domain_dimensions
dx=dxx[0];
dx = 1.*dx.ndarray_view()
dz=dxx[1];
dz = 1.*dz.ndarray_view()
cell_volume_x = np.prod(dxx)
ds1 = yt.load(path+'/diag100100/')
ds2 = yt.load(path+'/diag100200/')
cur_t1 = ds1.current_time
cur_t2 = ds2.current_time
cur_t2.to_ndarray
dt = (cur_t2-cur_t1)/100
dt = 1.*dt.ndarray_view();
return dx, dz, dt
dx, dz, dt = calculate_parameters(path_wx)
print(dx,dz,dt)
def get_fourier_transform_wx( path, fieldcomp,
iteration, plot=False, remove_last=True ):
"""
Calculate the Fourier transform of the field at a given iteration
"""
ds = yt.load(path + '/diag1%05d/' %iteration )
grid = ds.index.grids[0]
F = grid[fieldcomp]
F = F.ndarray_view()
if remove_last:
F = F[:-1,:-1]
F = F[:,:,0]
kxmax = np.pi/dx
kzmax = np.pi/dz
Nx = F.shape[0]
Nz = F.shape[1]
spectralF = np.fft.fftshift( np.fft.fft2(F) )[int(Nx/2):, int(Nz/2):]
if plot:
plt.imshow( np.log(abs(spectralF)), origin='lower',
extent=[0, kxmax, 0, kzmax] )
plt.colorbar()
return( spectralF, kxmax, kzmax )
def growth_rate_between_wx( path, iteration1, iteration2,
remove_last=False, threshold=-13 ):
"""
Calculate the difference in spectral amplitude between two iterations,
return the growth rate
"""
spec1, kxmax, kzmax = \
get_fourier_transform_wx( path, 'Ez', iteration=iteration1, remove_last=remove_last )
spec1 = np.where( abs(spec1) > np.exp(threshold), spec1, np.exp(threshold) )
spec2, kxmax, kzmax = \
get_fourier_transform_wx( path, 'Ez', iteration=iteration2, remove_last=remove_last )
spec2 = np.where( abs(spec2) > np.exp(threshold), spec2, np.exp(threshold) )
diff_growth = np.log( abs(spec2) ) - np.log( abs(spec1) )
diff_time = (iteration2-iteration1)*dt;
growth_rate = diff_growth/diff_time/c;
return( growth_rate, [0, kxmax, 0, kzmax] )
def energy( ts ):
Ex= ts.index.grids[0]['boxlib', 'Ex'].squeeze().v
Ey= ts.index.grids[0]['boxlib', 'Ey'].squeeze().v
Ez= ts.index.grids[0]['boxlib', 'Ez'].squeeze().v
Bx= ts.index.grids[0]['boxlib', 'Ex'].squeeze().v
By= ts.index.grids[0]['boxlib', 'Ey'].squeeze().v
Bz= ts.index.grids[0]['boxlib', 'Ez'].squeeze().v
energyE = scc.epsilon_0*np.sum(Ex**2+Ey**2+Ez**2)
energyB= np.sum(Bx**2+By**2+Bz**2)/scc.mu_0
energy = energyE + energyB
return energy
energy_list = []
for iter in iterations_warpx:
path = path_wx + '/diag1%05d/' %iter
ds = yt.load( path)
energy_list.append( energy(ds) )
iteration_start = 1700
iteration_end= 1900
iter_delta = iterations_warpx[2] - iterations_warpx[1]
ds_start = yt.load(path_wx + 'diag1%05d/' %iteration_start)
Ez_start= ds.index.grids[0]['boxlib', 'Ez'].squeeze().v
ds_end = yt.load(path_wx + 'diag1%05d/' %iteration_end)
Ez_end= ds_end.index.grids[0]['boxlib', 'Ez'].squeeze().v
gr_wx, extent = growth_rate_between_wx(path_wx, iteration_start, iteration_end )
fs = 13
vmax = 0.05
vmin=-vmax
cmap_special = 'bwr'
fig, (ax1, ax2) = plt.subplots( ncols=2, figsize=(10,4.) )
fs=14
cmap = "viridis"
aspect='auto'
img1 = ax1.imshow(Ez_start,aspect=aspect, cmap=cmap, extent=extent)
ax1.set_title('$t_{step}=$%i' % iteration_start, size=fs)
ax1.set_xlabel(' $k_{p,r} z$ ',size=fs)
ax1.set_ylabel(' $k_{p,r} x $ ',size=fs)
fig.colorbar(img1, ax=ax1, label = '$E_z, [V/m]$')
img2 = ax2.imshow(Ez_end,aspect=aspect, cmap=cmap, extent=extent)
ax2.set_title('$t_{step}=$%i' % iteration_end,size=fs)
ax2.set_xlabel(' $k_{p,r} z$ ',size=fs)
ax2.set_ylabel(' $k_{p,r} x $ ',size=fs)
fig.colorbar(img2, ax=ax2, label = '$E_z, [V/m]$')
plt.tight_layout()
fig, (ax1, ax2) = plt.subplots( ncols=2,nrows=1, figsize=(13,5.) )
fs=14
img1 = ax1.semilogy(iterations_warpx,energy_list)
ax1.semilogy(iteration_start,energy_list[iteration_start/iter_delta],'ro')
ax1.semilogy(iteration_end,energy_list[iteration_end/iter_delta],'ro')
ax1.grid()
ax1.legend()
ax1.set_xlabel('time step',size=fs)
ax1.set_ylabel('Total EM energy',size=fs)
img2 = ax2.imshow( gr_wx, origin='lower' ,cmap='bwr', vmax=0.05, vmin=-vmax, interpolation='nearest', extent=[0, 1, 0, 1] )
ax2.set_title('NCI growth rate',size=fs)
ax2.set_xlabel('$k_{p,r} z$ ',size=fs)
ax2.set_ylabel('$k_{p,r} x $ ',size=fs)
fig.colorbar(img2, ax=ax2, label = '$Im(\omega)/\omega_{p,r}$')
###Output
_____no_output_____ |
tf_quant_finance/examples/jupyter_notebooks/Black_Scholes_Price_and_Implied_Vol.ipynb | ###Markdown
Copyright 2019 Google LLC.Licensed under the Apache License, Version 2.0 (the "License");
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Black Scholes: Price and Implied Vol in TFF Run in Google Colab View source on GitHub
###Code
#@title Upgrade to TensorFlow 2.1+
!pip install --upgrade tensorflow
#@title Install TF Quant Finance
!pip install tf-quant-finance
#@title Imports
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
import tf_quant_finance as tff
option_price = tff.black_scholes.option_price
implied_vol = tff.black_scholes.implied_vol
from IPython.core.pylabtools import figsize
figsize(21, 14) # better graph size for Colab
###Output
_____no_output_____
###Markdown
Black Scholes pricing and implied volatility usageHere we see how to price vanilla options in the Black Scholes framework using the library. Semantics of the interfaceIf $S$ is the spot price of an asset, $r$ the risk free rate, $T$ the time to expiry, $\sigma$ the volatility. The price of a call $C$ under [Black Scholes](https://en.wikipedia.org/wiki/Black%E2%80%93Scholes_modelBlack%E2%80%93Scholes_formula) model exhibits the following relationship (suppressing unusued notation):$C(S, r) = e^{-rT} C(e^{rT}S, 0)$Where $e^{-rT}$ is the discount factor, and $e^{rT}S_t$ the forward price of the asset to expiry. The `tff`'s interface is framed in terms of forward prices and discount factors (rather than spot prices and risk free rates). This corresponds to the right hand side of the above relationship. ParallelismNote that the library allows pricing of options in parallel: each argument (such as the `strikes`) is an array and each index corresponds to an independent option to price. For example, this allows the simultaneous pricing of the same option with different expiry dates, or strike prices or both.
###Code
# Calculate discount factors (e^-rT)
rate = 0.05
expiries = np.array([0.5, 1.0, 2.0, 1.3])
discount_factors = np.exp(-rate * expiries)
# Current value of assets.
spots = np.array([0.9, 1.0, 1.1, 0.9])
# Forward value of assets at expiry.
forwards = spots / discount_factors
# Strike prices given by:
strikes = np.array([1.0, 2.0, 1.0, 0.5])
# Indicate whether options are call (True) or put (False)
is_call_options = np.array([True, True, False, False])
# The volatilites at which the options are to be priced.
volatilities = np.array([0.7, 1.1, 2.0, 0.5])
# Calculate the prices given the volatilities and term structure.
prices = option_price(
volatilities=volatilities,
strikes=strikes,
expiries=expiries,
forwards=forwards,
discount_factors=discount_factors,
is_call_options=is_call_options)
prices
###Output
_____no_output_____
###Markdown
We now show how to invert the Black Scholes pricing model in order to recover the volatility which generated a given market price under a particular term structure. Again, the implied volatility interface operates on batches of options, with each index of the arrays corresponding to an independent problem to solve.
###Code
# Initial positions for finding implied vol.
initial_volatilities = np.array([2.0, 0.5, 2.0, 0.5])
# Identifier whether the option is call (True) or put (False)
is_call_options = np.array([True, True, False, False])
# Find the implied vols beginning at initial_volatilities.
implied_vols = implied_vol(
prices=prices,
strikes=strikes,
expiries=expiries,
forwards=forwards,
discount_factors=discount_factors,
is_call_options=is_call_options,
initial_volatilities=initial_volatilities,
validate_args=True,
tolerance=1e-9,
max_iterations=200,
name=None,
dtype=None)
implied_vols
###Output
_____no_output_____
###Markdown
Which should show that `implied_vols` is very close to the `volatilities` used to generate the market prices. Here we provided initial starting positions, however, by default `tff` will chose an adaptive initialisation position as discussed below. Black Scholes implied volatility convergence regionWe now look at some charts which provide a basic illustration of the convergence region of the implemented root finding method.The library provides an implied volatility root finding method. If not providedwith an initial starting point, a starting point will be found using the Radiocic-Polya approximation [1] to the implied volatility. This section illustrates both call styles and the comparitive advantage of using targeted initialisation.In this example:* Forward prices are fixed at 1.* Strike prices are from uniform grid on (0, 5).* Expiries are fixed at 1.* Volatilities are from a uniform grid on (0, 5).* Fixed initial volatilities (where used) are 1.* Option prices were computed by tff.black_scholes.option_price on the other data.* Discount factors are 1.[1] Dan Stefanica and Rados Radoicic. [*An explicit implied volatility formula.*](https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2908494) International Journal of Theoretical and Applied Finance. Vol. 20, no. 7, 2017.
###Code
#@title Example data on a grid.
def grid_data(strike_vec, vol_vec, dtype=np.float64):
"""Construct dummy data with known ground truth.
For a grid of known strikes by volatilities, return the price.
Assumes the forward prices and expiries are fixed at unity.
Args:
strikes: a vector of strike prices from which to form the grid.
volatilities: a vector of volatilities from which to form the grid.
dtype: a numpy datatype for the element values of returned arrays.
Returns:
(forwards, strikes, expiries, true_volatilities, prices) all of
which are identically shaped numpy arrays.
"""
nstrikes = len(strike_vec)
nvolatilities = len(vol_vec)
vol_ones = np.matrix(np.ones((1, nvolatilities)))
strike_ones = np.matrix(np.ones((nstrikes, 1)))
strikes = np.array(np.matrix(strike_vec).T * vol_ones, dtype=dtype)
volatilities = np.array(strike_ones * np.matrix(vol_vec), dtype=dtype)
expiries = np.ones_like(strikes, dtype=dtype)
forwards = np.ones_like(strikes, dtype=dtype)
initials = np.ones_like(strikes, dtype=dtype)
prices = option_price(volatilities=volatilities,
strikes=strikes,
expiries=expiries,
forwards=forwards,
dtype=tf.float64)
return (forwards, strikes, expiries, volatilities, initials, prices)
# Build a 1000 x 1000 grid of options find the implied volatilities of.
nstrikes = 1000
nvolatilities = 1000
strike_vec = np.linspace(0.0001, 5.0, nstrikes)
vol_vec = np.linspace(0.0001, 5.0, nvolatilities)
max_iterations = 50
grid = grid_data(strike_vec, vol_vec)
forwards0, strikes0, expiries0, volatilities0, initials0, prices0 = grid
initials0 = discounts0 = signs0 = np.ones_like(prices0)
# Implied volitilities, starting the root finder at 1.
implied_vols_fix = implied_vol(
prices=prices0,
strikes=strikes0,
expiries=expiries0,
forwards=forwards0,
initial_volatilities=initials0,
validate_args=False,
tolerance=1e-8,
max_iterations=max_iterations)
# Implied vols starting the root finder at the Radiocic-Polya approximation.
implied_vols_polya = implied_vol(
prices=prices0,
strikes=strikes0,
expiries=expiries0,
forwards=forwards0,
validate_args=False,
tolerance=1e-8,
max_iterations=max_iterations)
#@title Visualisation of accuracy
plt.clf()
thinner = 100
fig, _axs = plt.subplots(nrows=1, ncols=2)
fig.subplots_adjust(hspace=0.3)
axs = _axs.flatten()
implied_vols = [implied_vols_fix, implied_vols_polya]
titles = ["Fixed initialisation implied vol minus true vol", "Radiocic-Polya initialised implied vol minus true vol"]
vmin = np.min(map(np.min, implied_vols))
vmax = np.max(map(np.max, implied_vols))
images = []
for i in range(2):
_title = axs[i].set_title(titles[i])
_title.set_position([.5, 1.03])
im = axs[i].imshow(implied_vols[i] - volatilities0, origin="lower", interpolation="none", cmap="seismic", vmin=-1.0, vmax=1.0)
images.append(im)
axs[i].set_xticks(np.arange(0, len(vol_vec), thinner))
axs[i].set_yticks(np.arange(0, len(strike_vec), thinner))
axs[i].set_xticklabels(np.round(vol_vec[0:len(vol_vec):thinner], 3))
axs[i].set_yticklabels(np.round(strike_vec[0:len(strike_vec):thinner], 3))
plt.colorbar(im, ax=axs[i], fraction=0.046, pad=0.00)
axs[i].set_ylabel('Strike')
axs[i].set_xlabel('True vol')
plt.show()
pass
###Output
_____no_output_____
###Markdown
Where the grey values represent `nan`s in the grid. Note that the bottom left corner of each image lies outside the bounds where inversion should be possible. The pattern of `nan` values for different values of a fixed initialisation strategy will be different (rerun the colab to see). Black Scholes implied volatility initialisation strategy accuracy comparisonWe can also consider the median absolute error for fixed versus Radiocic-Polya initialisation of the root finder. We consider a clipped grid looking at performance away from the boundaries where extreme values or nans might occur.
###Code
# Indices for selecting the middle of the grid.
vol_slice = np.arange(int(0.25*len(vol_vec)), int(0.75*len(vol_vec)))
strike_slice = np.arange(int(0.25*len(strike_vec)), int(0.75*len(strike_vec)))
error_fix = implied_vols_fix.numpy() - volatilities0
error_fix_sub = [error_fix[i, j] for i, j in zip(strike_slice, vol_slice)]
# Calculate the median absolute error in the central portion of the the grid
# for the fixed initialisation.
median_error_fix = np.median( np.abs(error_fix_sub) )
median_error_fix
error_polya = implied_vols_polya.numpy() - volatilities0
error_polya_sub = [error_polya[i, j] for i, j in zip(strike_slice, vol_slice)]
# Calculate the median absolute error in the central portion of the the grid
# for the Radiocic-Polya approximation.
median_error_polya = np.median( np.abs(error_polya_sub) )
median_error_polya
median_error_fix / median_error_polya
###Output
_____no_output_____
###Markdown
Copyright 2019 Google LLC.Licensed under the Apache License, Version 2.0 (the "License");
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Black Scholes: Price and Implied Vol in TFF Run in Google Colab View source on GitHub
###Code
#@title Install TF Quant Finance
!pip install tf-quant-finance
#@title Imports
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
tf.enable_eager_execution()
import tf_quant_finance as tff
option_price = tff.volatility.black_scholes.option_price
implied_vol = tff.volatility.implied_vol.implied_vol
from IPython.core.pylabtools import figsize
figsize(21, 14) # better graph size for Colab
###Output
_____no_output_____
###Markdown
Black Scholes pricing and implied volatility usageHere we see how to price vanilla options in the Black Scholes framework using the library. Semantics of the interfaceIf $S$ is the spot price of an asset, $r$ the risk free rate, $T$ the time to expiry, $\sigma$ the volatility. The price of a call $C$ under [Black Scholes](https://en.wikipedia.org/wiki/Black%E2%80%93Scholes_modelBlack%E2%80%93Scholes_formula) model exhibits the following relationship (suppressing unusued notation):$C(S, r) = e^{-rT} C(e^{rT}S, 0)$Where $e^{-rT}$ is the discount factor, and $e^{rT}S_t$ the forward price of the asset to expiry. The `tff`'s interface is framed in terms of forward prices and discount factors (rather than spot prices and risk free rates). This corresponds to the right hand side of the above relationship. ParallelismNote that the library allows pricing of options in parallel: each argument (such as the `strikes`) is an array and each index corresponds to an independent option to price. For example, this allows the simultaneous pricing of the same option with different expiry dates, or strike prices or both.
###Code
# Calculate discount factors (e^-rT)
rate = 0.05
expiries = np.array([0.5, 1.0, 2.0, 1.3])
discount_factors = np.exp(-rate * expiries)
# Current value of assets.
spots = np.array([0.9, 1.0, 1.1, 0.9])
# Forward value of assets at expiry.
forwards = spots / discount_factors
# Strike prices given by:
strikes = np.array([1.0, 2.0, 1.0, 0.5])
# Indicate whether options are call (True) or put (False)
is_call_options = np.array([True, True, False, False])
# The volatilites at which the options are to be priced.
volatilities = np.array([0.7, 1.1, 2.0, 0.5])
# Calculate the prices given the volatilities and term structure.
prices = option_price(
forwards,
strikes,
volatilities,
expiries,
discount_factors=discount_factors,
is_call_options=is_call_options)
prices
###Output
_____no_output_____
###Markdown
We now show how to invert the Black Scholes pricing model in order to recover the volatility which generated a given market price under a particular term structure. Again, the implied volatility interface operates on batches of options, with each index of the arrays corresponding to an independent problem to solve.
###Code
# Initial positions for finding implied vol.
initial_volatilities = np.array([2.0, 0.5, 2.0, 0.5])
# Option signs: 1.0 for call, -1.0 for put.
option_signs = np.array([1.0, 1.0, -1.0, -1.0])
# Find the implied vols beginning at initial_volatilities.
implied_vols, converged, failed = implied_vol(
forwards,
strikes,
expiries,
discount_factors,
prices,
option_signs,
initial_volatilities=initial_volatilities,
validate_args=True,
tolerance=1e-9,
max_iterations=200,
name=None,
dtype=None)
implied_vols
###Output
_____no_output_____
###Markdown
Which should show that `implied_vols` is very close to the `volatilities` used to generate the market prices. Here we provided initial starting positions, however, by default `tff` will chose an adaptive initialisation position as discussed below. Black Scholes implied volatility convergence regionWe now look at some charts which provide a basic illustration of the convergence region of the implemented root finding method.The library provides an implied volatility root finding method. If not providedwith an initial starting point, a starting point will be found using the Radiocic-Polya approximation [1] to the implied volatility. This section illustrates both call styles and the comparitive advantage of using targeted initialisation.In this example:* Forward prices are fixed at 1.* Strike prices are from uniform grid on (0, 5).* Expiries are fixed at 1.* Volatilities are from a uniform grid on (0, 5).* Fixed initial volatilities (where used) are 1.* Option prices were computed by tff.black_scholes.option_price on the other data.* Discount factors are 1.[1] Dan Stefanica and Rados Radoicic. [*An explicit implied volatility formula.*](https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2908494) International Journal of Theoretical and Applied Finance. Vol. 20, no. 7, 2017.
###Code
#@title Example data on a grid.
def grid_data(strike_vec, vol_vec, dtype=np.float64):
"""Construct dummy data with known ground truth.
For a grid of known strikes by volatilities, return the price.
Assumes the forward prices and expiries are fixed at unity.
Args:
strikes: a vector of strike prices from which to form the grid.
volatilities: a vector of volatilities from which to form the grid.
dtype: a numpy datatype for the element values of returned arrays.
Returns:
(forwards, strikes, expiries, true_volatilities, prices) all of
which are identically shaped numpy arrays.
"""
nstrikes = len(strike_vec)
nvolatilities = len(vol_vec)
vol_ones = np.matrix(np.ones((1, nvolatilities)))
strike_ones = np.matrix(np.ones((nstrikes, 1)))
strikes = np.array(np.matrix(strike_vec).T * vol_ones, dtype=dtype)
volatilities = np.array(strike_ones * np.matrix(vol_vec), dtype=dtype)
expiries = np.ones_like(strikes, dtype=dtype)
forwards = np.ones_like(strikes, dtype=dtype)
initials = np.ones_like(strikes, dtype=dtype)
prices = tff.volatility.black_scholes.option_price(forwards,
strikes,
volatilities,
expiries,
dtype=tf.float64)
return (forwards, strikes, expiries, volatilities, initials, prices)
# Build a 1000 x 1000 grid of options find the implied volatilities of.
nstrikes = 1000
nvolatilities = 1000
strike_vec = np.linspace(0.0001, 5.0, nstrikes)
vol_vec = np.linspace(0.0001, 5.0, nvolatilities)
max_iterations = 50
grid = grid_data(strike_vec, vol_vec)
forwards0, strikes0, expiries0, volatilities0, initials0, prices0 = grid
initials0 = discounts0 = signs0 = np.ones_like(prices0)
# Implied volitilities, starting the root finder at 1.
implied_vols_fix, converged_fix, failed_fix = implied_vol(
forwards0,
strikes0,
expiries0,
discounts0,
prices0,
signs0,
initial_volatilities=initials0,
validate_args=False,
tolerance=1e-8,
max_iterations=max_iterations)
# Implied vols starting the root finder at the Radiocic-Polya approximation.
implied_vols_polya, converged_polya, failed_polya = implied_vol(
forwards0,
strikes0,
expiries0,
discounts0,
prices0,
signs0,
validate_args=False,
tolerance=1e-8,
max_iterations=max_iterations)
#@title Visualisation of accuracy
plt.clf()
thinner = 100
fig, _axs = plt.subplots(nrows=1, ncols=2)
fig.subplots_adjust(hspace=0.3)
axs = _axs.flatten()
implied_vols = [implied_vols_fix, implied_vols_polya]
titles = ["Fixed initialisation implied vol minus true vol", "Radiocic-Polya initialised implied vol minus true vol"]
vmin = np.min(map(np.min, implied_vols))
vmax = np.max(map(np.max, implied_vols))
images = []
for i in range(2):
_title = axs[i].set_title(titles[i])
_title.set_position([.5, 1.03])
im = axs[i].imshow(implied_vols[i] - volatilities0, origin="lower", interpolation="none", cmap="seismic", vmin=-1.0, vmax=1.0)
images.append(im)
axs[i].set_xticks(np.arange(0, len(vol_vec), thinner))
axs[i].set_yticks(np.arange(0, len(strike_vec), thinner))
axs[i].set_xticklabels(np.round(vol_vec[0:len(vol_vec):thinner], 3))
axs[i].set_yticklabels(np.round(strike_vec[0:len(strike_vec):thinner], 3))
plt.colorbar(im, ax=axs[i], fraction=0.046, pad=0.00)
axs[i].set_ylabel('Strike')
axs[i].set_xlabel('True vol')
plt.show()
pass
###Output
_____no_output_____
###Markdown
Where the grey values represent `nan`s in the grid. Note that the bottom left corner of each image lies outside the bounds where inversion should be possible. The pattern of `nan` values for different values of a fixed initialisation strategy will be different (rerun the colab to see). Black Scholes implied volatility initialisation strategy accuracy comparisonWe can also consider the median absolute error for fixed versus Radiocic-Polya initialisation of the root finder. We consider a clipped grid looking at performance away from the boundaries where extreme values or nans might occur.
###Code
# Indices for selecting the middle of the grid.
vol_slice = np.arange(int(0.25*len(vol_vec)), int(0.75*len(vol_vec)))
strike_slice = np.arange(int(0.25*len(strike_vec)), int(0.75*len(strike_vec)))
error_fix = implied_vols_fix.numpy() - volatilities0
error_fix_sub = [error_fix[i, j] for i, j in zip(strike_slice, vol_slice)]
# Calculate the median absolute error in the central portion of the the grid
# for the fixed initialisation.
median_error_fix = np.median( np.abs(error_fix_sub) )
median_error_fix
error_polya = implied_vols_polya.numpy() - volatilities0
error_polya_sub = [error_polya[i, j] for i, j in zip(strike_slice, vol_slice)]
# Calculate the median absolute error in the central portion of the the grid
# for the Radiocic-Polya approximation.
median_error_polya = np.median( np.abs(error_polya_sub) )
median_error_polya
median_error_fix / median_error_polya
###Output
_____no_output_____ |
pythonpizza.ipynb | ###Markdown
Python Pizza - New Year's Party PyRT 2021 - Computer Graphics with Pythonhttps://hamburg.python.pizza/This notebook can be downloaded here: https://github.com/martinchristen/PythonPizzaNYE2020 About MeMartin Christen, [email protected] of Geoinformatics and Computer GraphicsTwitter @MartinChristen What is PyRT ?https://github.com/martinchristen/pyRT* PyRT - The Python Raytracer.* I originally developed pyRT for **teaching computer graphics**.* One part of pyrt is the **virtual framebuffer** where you can draw Pixels using standard algorithms such as Bresenham's line drawing algorithm [1]* From Version 0.5.0 an additional goal is better Jupyter integration, this is now done in RGBImage.* **server side rendering** without depending on a graphics card.[1] J. E. Bresenham, 1965. Algorithm for computer control of a digital plotter. In: IBM Systems Journal, 4, 1, 25–30, ISSN 0018-8670 Requirements* This notebook requires the following modules: pyrt, pillow, numpy, and moviepy* This notebook currently only runs on **Chrome Browser**. Yes, other browsers will be supported in 2021.`pip install pyrt` `pip install moviepy``conda install pillow numpy` Virtual Framebuffer for Pixel Operations
###Code
from pyrt.renderer import RGBImage
from pyrt.math import Vec2, Vec3
import random
###Output
_____no_output_____
###Markdown
Animated Virtual Framebuffer in Jupyter
###Code
w = 320
h = 240
image = RGBImage(w, h)
image.clear(Vec3(0.0,0.0,0.4))
for i in range(5000):
position = Vec2(random.randint(0, w - 1), random.randint(0, h - 1))
color = Vec3(random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1))
image.drawPoint(position, color, 1)
image.framebuffer()
for i in range(100):
pos1 = Vec2(random.randint(0, w - 1), random.randint(0, h - 1))
pos2 = Vec2(random.randint(0, w - 1), random.randint(0, h - 1))
color = Vec3(random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1))
image.drawLine(pos1, pos2, color, 2)
image.update(fps=30)
type(image.data)
print(image.data) # and yes, we could manipulate data directly here (or display with matplotlib)
###Output
_____no_output_____
###Markdown
Create a MovieFor more details check the moviepy docs: https://zulko.github.io/moviepy/ref/VideoClip/VideoClip.html
###Code
import moviepy.editor as mpy
from IPython.display import Video
from IPython.display import Image
w = 320
h = 240
image = RGBImage(w, h)
def make_frame(t):
image.drawCircleFilled(Vec2(127,127), int(t*8), Vec3(0,0,0), Vec3(1,0,0), 1)
return image.data
image.clear(Vec3(0.0,0.0,0.4))
clip = mpy.VideoClip(make_frame, duration=5)
clip.write_videofile("movie.mp4", fps=30)
Video("movie.mp4")
image.clear(Vec3(0.0,0.0,0.4))
clip.write_gif("movie.gif",fps=30, opt="OptimizePlus", fuzz=10)
Image("movie.gif")
###Output
_____no_output_____
###Markdown
Loading Images
###Code
from pyrt.renderer import loadimage
image2 = loadimage("data/worldmap/world600.jpg")
image2.framebuffer("world")
image2.drawCircleFilled(Vec2(300,150), radius=10, color=Vec3(1,0,0), fillcolor=Vec3(1,1,0), size=1)
image2.update("world")
image2.drawCircleFilled(Vec2(100,50), radius=30, color=Vec3(0,0,0), fillcolor=Vec3(0,1,0), size=3)
image2.update("world")
###Output
_____no_output_____
###Markdown
Animated Stars
###Code
from pyrt.renderer import RGBImage
from pyrt.math import Vec2, Vec3
import random
w = 600
h = 400
image = RGBImage(w, h)
image.clear(Vec3(0.0,0.0,0.0))
image.framebuffer('stars')
###Output
_____no_output_____
###Markdown
**Create new View for this output**: right click on blue bar and select "Create New View for Output" and move it to top right (Jupyter Lab only)
###Code
class Star:
def __init__(self, pos,size,color):
self.pos = pos # Vec2
self.size = size # int
self.color = color # Vec3
# Create a list of 2000 stars:
stars = []
for i in range(0,2000):
position = Vec2(random.randint(0, w-1), random.randint(0, h-1))
s = random.randint(1,4)
color = Vec3(s/4,s/4,s/4)
newstar = Star(position,s,color)
stars.append(newstar)
def render_stars(stars):
for star in stars:
image.drawPoint(star.pos, star.color, star.size)
image.update('stars', fps=30)
render_stars(stars)
def update_stars(stars):
for star in stars:
# increase position
star.pos.x = star.pos.x + star.size*2
if star.pos.x>=w:
star.pos.x=0
image.clear(Vec3(0.0,0.0,0.0))
render_stars(stars)
update_stars(stars)
for i in range(0,250):
update_stars(stars)
###Output
_____no_output_____
###Markdown
Fireworks - Particle ExplosionWe're creating two classes:* **Particle**: a single particle with a position, velocity, speed, size, lifetime* **Explosion**: a collection of particles with a velocity along a circle and a random speed, size, color
###Code
import math, random
image.clear(Vec3(0.0,0.0,0.0))
image.update('stars')
class Particle:
def __init__(self, pos, velocity, color, size, frames=-1):
self.pos = pos # position, Vec2
self.v = velocity # velocity of particle, Vec2
self.color = color # color of particle, Vec3
self.size = size # size of particle, int
self.frames = frames # number of frames before this particle is removed, int
class Explosion:
def __init__(self, pos, num_particles):
self.particles = []
self.maxframes = 100
for i in range(0,num_particles):
angle = 2*(0.5-random.random())*math.pi
color = Vec3(0.8+random.random()*0.2,0.3*random.random(),0.3*random.random())
s = random.randint(1,4)
size = random.randint(1,3)
speed = 3 # 3*random.random() # if constant -> circle
p = Particle(pos, Vec2(math.cos(angle)*speed,
math.sin(angle)*speed),
color, size, self.maxframes)
self.particles.append(p)
def update(self):
for p in self.particles:
p.pos = Vec2(p.pos.x + p.v.x, p.pos.y + p.v.y)
p.frames -= 1
if p.frames < 0:
p.frames = 0
def draw(self):
for p in self.particles:
fade = p.frames / self.maxframes
image.drawPoint(Vec2(int(p.pos.x),
int(p.pos.y)),
fade * p.color,
p.size)
e = Explosion(Vec2(w//2,h//2), 500)
image.clear(Vec3(0.0,0.0,0.0))
for i in range(0,100):
image.clear(Vec3(0.0,0.0,0.0))
e.update()
e.draw()
image.update('stars')
###Output
_____no_output_____
###Markdown
Multiple particle explosions
###Code
e0 = Explosion(Vec2(w//2,h//2), 500)
for i in range(0,100):
image.clear(Vec3(0.0,0.0,0.0))
e0.update()
e0.draw()
image.update('stars')
e1 = Explosion(Vec2(w//4,h//3), 800)
for i in range(0,100):
image.clear(Vec3(0.0,0.0,0.0))
e1.update()
e1.draw()
image.update('stars')
e2 = Explosion(Vec2(w//2+30,h//3), 800)
e3 = Explosion(Vec2(w//4,h//4), 800)
for i in range(0,100):
image.clear(Vec3(0.0,0.0,0.0))
e2.update()
e3.update()
e2.draw()
e3.draw()
image.update('stars', fps=30)
###Output
_____no_output_____
###Markdown
And now let's create a Movie
###Code
e0 = Explosion(Vec2(w//2,h//2), 500)
e1 = Explosion(Vec2(w//4,h//3), 800)
e2 = Explosion(Vec2(w//2+30,h//3), 800)
e3 = Explosion(Vec2(w//4,h//4), 800)
# 30 fps x 10 seconds = 300 frames, so let's make 10 seconds
duration = 10
fps = 30
def make_frame(s):
# s runs from 0...10 (if clip is 10 seconds)
# calculate frame number:
f = int(s*fps)
if s < 3:
image.clear(Vec3(0.0,0.0,0.0))
e0.update()
e0.draw()
elif s < 6:
image.clear(Vec3(0.0,0.0,0.0))
e1.update()
e1.draw()
else:
image.clear(Vec3(0.0,0.0,0.0))
e2.update()
e3.update()
e2.draw()
e3.draw()
return image.data
clip = mpy.VideoClip(make_frame, duration=duration)
image.clear(Vec3(0.0,0.0,0.0))
clip.write_gif("firework.gif",fps=fps, opt="OptimizePlus", fuzz=10)
Image("firework.gif")
###Output
_____no_output_____ |
Udemy/Machhine Learning Bootcamp/ml-practice-spam-classifier/BayesClassifier-testing.ipynb | ###Markdown
Imports
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
###Output
_____no_output_____
###Markdown
Constants
###Code
VOCAB_SIZE = 2500
TOKEN_SPAM_PROB_FILE = 'SpamData/03_Testing/prob-spam.txt'
TOKEN_HAM_PROB_FILE = 'SpamData/03_Testing/prob-nonspam.txt'
TOKEN_ALL_PROB_FILE = 'SpamData/03_Testing/prob-all-tokens.txt'
TEST_FEATURE_MATRIX = 'SpamData/03_Testing/test-features.txt'
TEST_TRAGET_FILE = 'SpamData/03_Testing/test-target.txt'
###Output
_____no_output_____
###Markdown
Load the data
###Code
X_test = np.loadtxt(TEST_FEATURE_MATRIX,delimiter=' ')
y_test = np.loadtxt(TEST_TRAGET_FILE, delimiter=' ')
prob_token_spam = np.loadtxt(TOKEN_SPAM_PROB_FILE, delimiter=' ')
prob_token_ham = np.loadtxt(TOKEN_HAM_PROB_FILE, delimiter=' ')
prob_all_tokkens = np.loadtxt(TOKEN_ALL_PROB_FILE, delimiter=' ')
###Output
_____no_output_____
###Markdown
Calculating Joint Probability
###Code
print(f'The dimensions of the dot product between X_test and prob_token_spam are {X_test.dot(prob_token_spam).shape}')
###Output
The dimensions of the dot product between X_test and prob_token_spam are (1724,)
###Markdown
Set the priror $P(Spam|X) = \frac{P(X|Spam)P(Spam)}{P(X)}$
###Code
PROB_SPAM = 0.3109
###Output
_____no_output_____
###Markdown
```Taking log because having log reduces the calculation to addition and subtraction instead of multiplication and division and since the numbers are very close, taking log spreads them out so that the plots become much neater```
###Code
np.log(prob_token_spam)
###Output
_____no_output_____
###Markdown
Joint probability in log format
###Code
joint_log_spam = X_test.dot(np.log(prob_token_spam)-np.log(prob_all_tokkens)) + np.log(PROB_SPAM)
joint_log_spam[:5]
joint_log_ham = X_test.dot(np.log(prob_token_ham)-np.log(prob_all_tokkens)) + np.log(1-PROB_SPAM)
joint_log_ham[:5]
joint_log_ham.size
###Output
_____no_output_____
###Markdown
Making Predictions Checking for higher probabilities$P(Spam|X)>P(Ham|X)$$P(Spam|X)<P(Ham|X)$
###Code
predictions = (joint_log_spam>joint_log_ham)*1
predictions[-5:]
y_test[-5:]
###Output
_____no_output_____
###Markdown
Metrics and Evaluation Accuracy
###Code
correct_docs = (y_test == predictions).sum()
print(f'Docs classified correctly {correct_docs}')
num_docs_wrong = X_test.shape[0] - correct_docs
print(f'Number of documents classified incorrectly {num_docs_wrong}')
accuracy = (correct_docs/X_test.shape[0])*100
print(f'Accuracy is {accuracy:.3f}%')
print(f'Fraction of emails misclassified {100-accuracy:.3f}%')
###Output
Fraction of emails misclassified 2.262%
###Markdown
Demerits of accuracy 1.If there is a high number of positive outcomes and we build a model that classifies all the outcomes as positive then we have a bad model which can lead to serious problems but the accuracy will be high which is misleading
###Code
np.unique(predictions,return_counts=True)
true_pos = (y_test == 1)&(predictions == 1) #the email is spam and it is classified as spam
true_pos.sum()
false_pos = (y_test == 0)&(predictions == 1) #the email is not spam but it is classified as spam
false_pos.sum()
false_neg = (y_test == 1)&(predictions==0) #the email is spam but it is classified as not spam
false_neg.sum()
true_neg = (y_test==0)&(predictions==0) #the email is not spam but it is classified as not spam
true_neg.sum()
confusion_matrix = np.array([[true_pos.sum(), false_pos.sum()],[false_neg.sum(), true_neg.sum()]])
confusion_matrix
###Output
_____no_output_____
###Markdown
$Recall = \frac{TP}{TP+FN}$ We can think of this as out of all the spam emails, how many spam emails did we classify correctlyWeakness of recall score is that it can be easily maximized/manipulated by labelling all emails as spam
###Code
recall = true_pos.sum()/(true_pos.sum()+false_neg.sum())
print(f'The recall score is {recall:.3f}')
###Output
The recall score is 0.966
###Markdown
$Precision = \frac{TP}{TP+FP}$Ratio of correctly classified spam messages to the total number of times we predicted spam
###Code
precision = true_pos.sum()/(true_pos.sum()+false_pos.sum())
print(f'The precision is {precision:.3f}')
###Output
The precision is 0.968
###Markdown
F Score = $2 \times \frac{precision \times recall}{precision + recall}$F score is the harmonic mean of precision and recall and because of this it takes both the false positives and the false negatives into account and also it has a value between 0 and 1 so it provides a universal scale for comparison
###Code
f1_score = 2*((precision*recall)/(precision+recall))
print(f'The F1 score is {f1_score:.3f}')
###Output
The F1 score is 0.967
|
data-databases-homework/Homework_5_Gruen.ipynb | ###Markdown
Homework 5This homework presents a sophisticated scenario in which you must design a SQL schema, insert data into it, and issue queries against it. The scenarioIn the year 20XX, I have won the lottery and decided to leave my programming days behind me in order to pursue my true calling as a [cat cafe](https://en.wikipedia.org/wiki/Cat_caf%C3%A9) tycoon. [This webpage](http://static.decontextualize.com/cats.html) lists the locations of my cat cafes and all the cats that are currently in residence at these cafes.I'm interested in doing more detailed analysis of my cat cafe holdings and the cats that are currently being cared for by my cafes. For this reason, I've hired *you* to convert this HTML page into a workable SQL database. (Why don't I just do it myself? Because I am far too busy hanging out with adorable cats in all of my beautiful, beautiful cat cafes.)Specifically, I want to know the answers to the following questions:* What's the name of the youngest cat at any location?* In which zip codes can I find a lilac-colored tabby?* What's the average weight of cats currently residing at any location (grouped by location)?* Which location has the most cats with tortoiseshell coats?Because I'm not paying you very much, and because I am a merciful person who has considerable experience in these matters, I've decided to *write the queries for you*. (See below.) Your job is just to scrape the data from the web page, create the appropriate tables in PostgreSQL, and insert the data into those tables.Before you continue, scroll down to "The Queries" below to examine the queries as I wrote them. Problem set 1: Scraping the dataYour first goal is to create two data structures, both lists of dictionaries: one for the list of locations and one for the list of cats. You'll get these from scraping two `` tags in the HTML: the first table has a class of `cafe-list`, the second has a class of `cat-list`.Before you do anything else, though, execute the following cell to import Beautiful Soup and create a BeautifulSoup object with the content of the web page:
###Code
from bs4 import BeautifulSoup
from urllib.request import urlopen
html = urlopen("http://static.decontextualize.com/cats.html").read()
document = BeautifulSoup(html, "html.parser")
###Output
_____no_output_____
###Markdown
Let's tackle the list of cafes first. In the cell below, write some code that creates a list of dictionaries with information about each cafe, assigning it to the variable `cafe_list`. I've written some of the code for you; you just need to fill in the rest. The list should end up looking like this:```[{'name': 'Hang In There', 'zip': '11237'}, {'name': 'Independent Claws', 'zip': '11201'}, {'name': 'Paws and Play', 'zip': '11215'}, {'name': 'Tall Tails', 'zip': '11222'}, {'name': 'Cats Meow', 'zip': '11231'}]```
###Code
cafe_list = list()
cafe_table = document.find('table', {'class': 'cafe-list'})
tbody = cafe_table.find('tbody')
for tr_tag in tbody.find_all('tr'):
for td_tag in tr_tag.find_all('td'):
cafe_name = tr_tag.find('td', {'class': 'name'}).string
cafe_zip = tr_tag.find('td', {'class': 'zip'}).string
cafe_dict={'name': cafe_name, 'zip': cafe_zip}
cafe_list.append(cafe_dict)
cafe_list
###Output
_____no_output_____
###Markdown
Great! In the following cell, write some code that creates a list of cats from the `` tag on the page, storing them as a list of dictionaries in a variable called `cat_list`. Again, I've written a bit of the code for you. Expected output:```[{'birthdate': '2015-05-20', 'color': 'black', 'locations': ['Paws and Play', 'Independent Claws*'], 'name': 'Sylvester', 'pattern': 'colorpoint', 'weight': 10.46}, {'birthdate': '2000-01-03', 'color': 'cinnamon', 'locations': ['Independent Claws*'], 'name': 'Jasper', 'pattern': 'solid', 'weight': 8.06}, {'birthdate': '2006-02-27', 'color': 'brown', 'locations': ['Independent Claws*'], 'name': 'Luna', 'pattern': 'tortoiseshell', 'weight': 10.88},[...many records omitted for brevity...] {'birthdate': '1999-01-09', 'color': 'white', 'locations': ['Cats Meow*', 'Independent Claws', 'Tall Tails'], 'name': 'Lafayette', 'pattern': 'tortoiseshell', 'weight': 9.3}]```Note: Observe the data types of the values in each dictionary! Make sure to explicitly convert values retrieved from `.string` attributes of Beautiful Soup tag objects to `str`s using the `str()` function.
###Code
cat_list = list()
cat_table = document.find('table', {'class': 'cat-list'})
tbody = cat_table.find('tbody')
for tr_tag in tbody.find_all('tr'):
cat_dict = {}
cat_dict['birthdate'] = tr_tag.find('td', {'class': 'birthdate'}).string
cat_dict['color'] = tr_tag.find('td', {'class': 'color'}).string
cat_dict['locations'] = tr_tag.find('td', {'class': 'locations'}).string.split(",")
cat_dict['name'] = tr_tag.find('td', {'class': 'name'}).string
cat_dict['pattern'] = tr_tag.find('td', {'class': 'pattern'}).string
cat_dict['weight'] = float(tr_tag.find('td', {'class': 'weight'}).string)
cat_list.append(cat_dict)
cat_list
###Output
_____no_output_____
###Markdown
Problem set 2: Designing the schemaBefore you do anything else, use `psql` to create a new database for this homework assignment using the following command: CREATE DATABASE catcafes; In the following cell, connect to the database using `pg8000`. (You may need to provide additional arguments to the `.connect()` method, depending on the distribution of PostgreSQL you're using.)
###Code
import pg8000
conn = pg8000.connect(user='postgres', password='12345', database="catcafes")
###Output
_____no_output_____
###Markdown
Here's a cell you can run if something goes wrong and you need to rollback the current query session:
###Code
conn.rollback()
###Output
_____no_output_____
###Markdown
In the cell below, you're going to create *three* tables, necessary to represent the data you scraped above. I've given the basic framework of the Python code and SQL statements to create these tables. I've given the entire `CREATE TABLE` statement for the `cafe` table, but for the other two, you'll need to supply the field names and the data types for each column. If you're unsure what to call the fields, or what fields should be in the tables, consult the queries in "The Queries" below. Hints:* Many of these fields will be `varchar`s. Don't worry too much about how many characters you need—it's okay just to eyeball it.* Feel free to use a `varchar` type to store the `birthdate` field. No need to dig too deep into PostgreSQL's date types for this particular homework assignment.* Cats and locations are in a *many-to-many* relationship. You'll need to create a linking table to represent this relationship. (That's why there's space for you to create *three* tables.)* The linking table will need a field to keep track of whether or not a particular cafe is the "current" cafe for a given cat.
###Code
cursor = conn.cursor()
cursor.execute("""
CREATE TABLE cafe (
id serial,
name varchar(40),
zip varchar(5)
)
""")
cursor.execute("""
CREATE TABLE cat (
id serial,
name varchar(50),
birthdate varchar(50),
weight float,
color varchar(50),
pattern varchar(50)
)
""")
cursor.execute("""
CREATE TABLE cat_cafe (
cat_id int,
cafe_id int,
active bool
)
""")
conn.commit()
###Output
_____no_output_____
###Markdown
After executing the above cell, issuing a `\d` command in `psql` should yield something that looks like the following:``` List of relations Schema | Name | Type | Owner --------+-------------+----------+--------- public | cafe | table | allison public | cafe_id_seq | sequence | allison public | cat | table | allison public | cat_cafe | table | allison public | cat_id_seq | sequence | allison(5 rows)```If something doesn't look right, you can always use the `DROP TABLE` command to drop the tables and start again. (You can also issue a `DROP DATABASE catcafes` command to drop the database altogether.) Don't worry if it takes a few tries to get it right—happens to the best and most expert among us. You'll probably have to drop the database and start again from scratch several times while completing this homework.> Note: If you try to issue a `DROP TABLE` or `DROP DATABASE` command and `psql` seems to hang forever, it could be that PostgreSQL is waiting for current connections to close before proceeding with your command. To fix this, create a cell with the code `conn.close()` in your notebook and execute it. After the `DROP` commands have completed, make sure to run the cell containing the `pg8000.connect()` call again. Problem set 3: Inserting the dataIn the cell below, I've written the code to insert the cafes into the `cafe` table, using data from the `cafe_list` variable that we made earlier. If the code you wrote to create that table was correct, the following cell should execute without error or incident. Execute it before you continue.
###Code
cafe_name_id_map = {}
for item in cafe_list:
cursor.execute("INSERT INTO cafe (name, zip) VALUES (%s, %s) RETURNING id",
[str(item['name']), str(item['zip'])])
rowid = cursor.fetchone()[0]
cafe_name_id_map[str(item['name'])] = rowid
conn.commit()
###Output
_____no_output_____
###Markdown
Issuing `SELECT * FROM cafe` in the `psql` client should yield something that looks like this:``` id | name | zip ----+-------------------+------- 1 | Hang In There | 11237 2 | Independent Claws | 11201 3 | Paws and Play | 11215 4 | Tall Tails | 11222 5 | Cats Meow | 11231(5 rows)```(The `id` values may be different depending on how many times you've cleaned the table out with `DELETE`.)Note that the code in the cell above created a dictionary called `cafe_name_id_map`. What's in it? Let's see:
###Code
cafe_name_id_map
###Output
_____no_output_____
###Markdown
The dictionary maps the *name of the cat cafe to its ID in the database*. You'll need these values later when you're adding records to the linking table (`cat_cafe`).Now the tricky part. (Yes, believe it or not, *this* is the tricky part. The other stuff has all been easy by comparison.) In the cell below, write the Python code to insert each cat's data from the `cat_list` variable (created in Problem Set 1) into the `cat` table. The code should *also* insert the relevant data into the `cat_cafe` table. Hints:* You'll need to get the `id` of each cat record using the `RETURNING` clause of the `INSERT` statement and the `.fetchone()` method of the cursor object.* How do you know whether or not the current location is the "active" location for a particular cat? The page itself contains some explanatory text that might be helpful here. You might need to use some string checking and manipulation functions in order to make this determination and transform the string as needed.* The linking table stores an ID only for both the cat and the cafe. Use the `cafe_name_id_map` dictionary to get the `id` of the cafes inserted earlier.
###Code
conn.rollback()
cat_insert = "Insert into cat (name, birthdate, weight, color, pattern) values (%s, %s, %s, %s, %s) returning id"
cat_cafe_insert = "Insert into cat_cafe (cat_id, cafe_id, active) values (%s, %s, %s)"
for cat in cat_list:
cursor.execute(cat_insert, [str(cat['name']), str(cat['birthdate']), float(cat['weight']), str(cat['color']), str(cat['pattern'])])
catrowid = cursor.fetchone()[0]
for cafe in cat['locations']:
for place in cafe_name_id_map:
if place in cafe:
if '*' in cafe:
cursor.execute(cat_cafe_insert, [catrowid, cafe_name_id_map[place], True])
else:
cursor.execute(cat_cafe_insert, [catrowid, cafe_name_id_map[place], False])
conn.commit()
###Output
_____no_output_____
###Markdown
Issuing a `SELECT * FROM cat LIMIT 10` in `psql` should yield something that looks like this:``` id | name | birthdate | weight | color | pattern ----+-----------+------------+--------+----------+--------------- 1 | Sylvester | 2015-05-20 | 10.46 | black | colorpoint 2 | Jasper | 2000-01-03 | 8.06 | cinnamon | solid 3 | Luna | 2006-02-27 | 10.88 | brown | tortoiseshell 4 | Georges | 2015-08-13 | 9.40 | white | tabby 5 | Millie | 2003-09-13 | 9.27 | red | bicolor 6 | Lisa | 2009-07-30 | 8.84 | cream | colorpoint 7 | Oscar | 2011-12-15 | 8.44 | cream | solid 8 | Scaredy | 2015-12-30 | 8.83 | lilac | tabby 9 | Charlotte | 2013-10-16 | 9.54 | blue | tabby 10 | Whiskers | 2011-02-07 | 9.47 | white | colorpoint(10 rows)```And a `SELECT * FROM cat_cafe LIMIT 10` in `psql` should look like this:``` cat_id | cafe_id | active --------+---------+-------- 1 | 3 | f 1 | 2 | t 2 | 2 | t 3 | 2 | t 4 | 4 | t 4 | 1 | f 5 | 3 | t 6 | 1 | t 7 | 1 | t 7 | 5 | f(10 rows)```Again, the exact values for the ID columns might be different, depending on how many times you've deleted and dropped the tables. The QueriesOkay. To verify your work, run the following queries and check their output. If you've correctly scraped the data and imported it into SQL, running the cells should produce exactly the expected output, as indicated. If not, then you performed one of the steps above incorrectly; check your work and try again. (Note: Don't modify these cells, just run them! This homework was about *scraping* and *inserting* data, not querying it.) What's the name of the youngest cat at any location?Expected output: `Scaredy`
###Code
cursor.execute("SELECT max(birthdate) FROM cat")
birthdate = cursor.fetchone()[0]
cursor.execute("SELECT name FROM cat WHERE birthdate = %s", [birthdate])
print(cursor.fetchone()[0])
###Output
Scaredy
###Markdown
In which zip codes can I find a lilac-colored tabby?Expected output: 11237, 11215
###Code
cursor.execute("""SELECT DISTINCT(cafe.zip)
FROM cat
JOIN cat_cafe ON cat.id = cat_cafe.cat_id
JOIN cafe ON cafe.id = cat_cafe.cafe_id
WHERE cat.color = 'lilac' AND cat.pattern = 'tabby' AND cat_cafe.active = true
""")
print(', '.join([x[0] for x in cursor.fetchall()]))
###Output
11237, 11215
###Markdown
What's the average weight of cats currently residing at all locations?Expected output:```Independent Claws: 9.33Paws and Play: 9.28Tall Tails: 9.82Hang In There: 9.25Cats Meow: 9.76```
###Code
cursor.execute("""
SELECT cafe.name, avg(cat.weight)
FROM cat
JOIN cat_cafe ON cat.id = cat_cafe.cat_id
JOIN cafe ON cafe.id = cat_cafe.cafe_id
WHERE cat_cafe.active = true
GROUP BY cafe.name
""")
for rec in cursor.fetchall():
print(rec[0]+":", "%0.2f" % rec[1])
###Output
Hang In There: 9.25
Independent Claws: 9.33
Paws and Play: 9.28
Tall Tails: 9.82
Cats Meow: 9.75
###Markdown
Which location has the most cats with tortoiseshell coats?Expected output: `Independent Claws`
###Code
cursor.execute("""
SELECT cafe.name
FROM cat
JOIN cat_cafe ON cat.id = cat_cafe.cat_id
JOIN cafe ON cafe.id = cat_cafe.cafe_id
WHERE cat_cafe.active = true AND cat.pattern = 'tortoiseshell'
GROUP BY cafe.name
ORDER BY count(cat.name) DESC
LIMIT 1
""")
print(cursor.fetchone()[0])
###Output
Independent Claws
|
9.10.ipynb | ###Markdown
基本程序设计- 一切代码输入,请使用英文输入法
###Code
radius = 10
area = radius * radius * 3.1415
print(area)
print('hello')
###Output
hello
###Markdown
编写一个简单的程序- 圆公式面积: area = radius \* radius \* 3.1415 在Python里面不需要定义数据的类型 控制台的读取与输入- input 输入进去的是字符串- eval
###Code
x=int(input('请输入正方形的边长'))
area = x*x
print(area)
x=eval(input('请输入正方形的边长'))
area = x*x
print(area)
###Output
请输入正方形的边长10.0
100.0
###Markdown
- 在jupyter用shift + tab 键可以跳出解释文档 变量命名的规范- 由字母、数字、下划线构成- 不能以数字开头 \*- 标识符不能是关键词(实际上是可以强制改变的,但是对于代码规范而言是极其不适合)- 可以是任意长度- 驼峰式命名 变量、赋值语句和赋值表达式- 变量: 通俗理解为可以变化的量- x = 2 \* x + 1 在数学中是一个方程,而在语言中它是一个表达式- test = test + 1 \* 变量在赋值之前必须有值 同时赋值var1, var2,var3... = exp1,exp2,exp3... 定义常量- 常量:表示一种定值标识符,适合于多次使用的场景。比如PI- 注意:在其他低级语言中如果定义了常量,那么,该常量是不可以被改变的,但是在Python中一切皆对象,常量也是可以被改变的 数值数据类型和运算符- 在Python中有两种数值类型(int 和 float)适用于加减乘除、模、幂次 运算符 /、//、** 运算符 %
###Code
week = eval(input('今天是星期'))
qu = (week+10) % 7
print("在十天后是星期"+str(qu))
num=eval(input(">>"))
min = num//60
seconds = num%60
print(str(min)+'分'+str(seconds)+'秒' )
num=eval(input("请输入一个数字"))
if num%2==0:
print('这个数是偶数')
else:
print('这个数是奇数')
part1=(3+4*10)/5
part2=(10*(6-5)*(0+1+1))/10
part3=9*(4/10+(9+10)/6)
sum=part1-part2+part3
print(sum)
###Output
38.699999999999996
###Markdown
EP:- 25/4 多少,如果要将其转变为整数该怎么改写- 输入一个数字判断是奇数还是偶数- 进阶: 输入一个秒数,写一个程序将其转换成分和秒:例如500秒等于8分20秒- 进阶: 如果今天是星期六,那么10天以后是星期几? 提示:每个星期的第0天是星期天
###Code
25/4
25//4
25%4
###Output
_____no_output_____
###Markdown
科学计数法- 1.234e+2- 1.234e-2 计算表达式和运算优先级 增强型赋值运算 类型转换- float -> int- 四舍五入 round EP:- 如果一个年营业税为0.06%,那么对于197.55e+2的年收入,需要交税为多少?(结果保留2为小数)- 必须使用科学计数法
###Code
round(197.55e+2 * 6e-4,2)
###Output
_____no_output_____
###Markdown
Project- 用Python写一个贷款计算器程序:输入的是月供(monthlyPayment) 输出的是总还款数(totalpayment) Homework- 1
###Code
degree = eval(input('输入摄氏度'))
fah = (9/5)* degree + 32
print(str(degree)+'摄氏度等于'+str(fah)+'华氏度')
###Output
输入摄氏度43
43摄氏度等于109.4华氏度
###Markdown
- 2
###Code
radius,height =eval(input('请输入圆柱的半径和高'))
area = radius**2*3.14
volume=area*height
print('圆柱的面积为'+str(area)+'\n'+'圆柱的体积为'+str(volume))
###Output
请输入圆柱的半径和高5.5,12
圆柱的面积为94.985
圆柱的体积为1139.82
###Markdown
- 3
###Code
feet = eval(input('输入英尺数'))
meter = feet*305e-3
print(str(feet)+'英尺等于'+str(meter)+'米')
###Output
输入英尺数16.5
16.5英尺等于5.0325米
###Markdown
- 4
###Code
water = eval(input('请输入水的重量(kg)'))
initial_tem=eval(input('请输入初始温度'))
final_tem =eval(input('请输入最终温度'))
Q=water*(final_tem-initial_tem) *4184
print('将'+str(water)+'千克的水加热需要的能量是'+str(Q)+'焦耳')
###Output
请输入水的重量(kg)55.5
请输入初始温度3.5
请输入最终温度10.5
将55.5千克的水加热需要的能量是1625484.0焦耳
###Markdown
- 5
###Code
差额=eval(input('请输入差额:'))
年利率=eval(input('请输入年利率:'))
利息=round(差额*(年利率/1200),3)
print('月供利息为'+str(利息))
###Output
请输入差额:1000
请输入年利率:3.5
月供利息为2.917
###Markdown
- 6
###Code
init_speed=eval(input('初速度(m/s):'))
final_speed=eval(input('末速度(m/s):'))
times=eval(input('时间:'))
a=round((final_speed-init_speed)/times,4)
print('平均加速度为'+str(a))
###Output
初速度(m/s):5.5
末速度(m/s):50.9
时间:4.5
平均加速度为10.0889
###Markdown
- 7 进阶
###Code
money=eval(input('每月存储额度:'))
summ=0
i=1
while i<=6:
summ=(summ+money)*(1+0.05/12)
i=i+1
print('六个月后存款为',round(summ,2))
###Output
每月存储额度:100
六个月后存款为 608.81
###Markdown
- 8 进阶
###Code
number=eval(input('输入0-1000数字:'))
num_1=number%10
num_2=(number//10)%10
num_3=number//100
summ=num_1+num_2+num_3
print(str(summ))
###Output
输入0-1000数字:999
27
###Markdown
基本程序设计- 一切代码输入,请使用英文输入法
###Code
print('hello world!')
###Output
hello world!
###Markdown
编写一个简单的程序- 圆公式面积: area = radius \* radius \* 3.1415 在Python里面不需要定义数据的类型 控制台的读取与输入- input 输入进去的是字符串- eval
###Code
radius=int(input('请输入一个半径'))
area=radius * radius * 3.1415
print(area)
bianchang=eval(input('>>'))
area=bianchang*bianchang
print(area)
###Output
>>5
25
###Markdown
- 在jupyter用shift + tab 键可以跳出解释文档 变量命名的规范- 由字母、数字、下划线构成- 不能以数字开头 \*- 标识符不能是关键词(实际上是可以强制改变的,但是对于代码规范而言是极其不适合)- 可以是任意长度- 驼峰式命名 变量、赋值语句和赋值表达式- 变量: 通俗理解为可以变化的量- x = 2 \* x + 1 在数学中是一个方程,而在语言中它是一个表达式- test = test + 1 \* 变量在赋值之前必须有值
###Code
radius=eval(input('>>'))
radius=radius+radius
print(radius)
###Output
>>10
20
###Markdown
同时赋值var1, var2,var3... = exp1,exp2,exp3...
###Code
小红,小兰='男','女'
print(小红,小兰)
###Output
男 女
###Markdown
定义常量- 常量:表示一种定值标识符,适合于多次使用的场景。比如PI- 注意:在其他低级语言中如果定义了常量,那么,该常量是不可以被改变的,但是在Python中一切皆对象,常量也是可以被改变的 数值数据类型和运算符- 在Python中有两种数值类型(int 和 float)适用于加减乘除、模、幂次 运算符 /(除)、//(整除)、**(幂) 运算符 % EP:- 25/4 多少,如果要将其转变为整数该怎么改写- 输入一个数字判断是奇数还是偶数- 进阶: 输入一个秒数,写一个程序将其转换成分和秒:例如500秒等于8分20秒- 进阶: 如果今天是星期六,那么10天以后是星期几? 提示:每个星期的第0天是星期天
###Code
print(25/4)
print(25//4)
num=eval(input('输入一个数字'))
if num%2==0:
print('偶数')
else :
print ('奇数')
miao=eval(input('输入秒数'))
fen=miao//60
fenm=miao%60
print(str(fen)+'分',str(fenm)+'秒')
weekend = eval (input('>>'))
res = (weekend + 10) % 7
print(str(weeekend)+'在十天之后是星期'+str(res))
###Output
_____no_output_____
###Markdown
科学计数法- 1.234e+2- 1.234e-2 计算表达式和运算优先级
###Code
x=10
y=6
a=0
b=1
c=1
p1=(3+4*x)/5
p2=10*(y-5)*(a+b+c)/x
p3=4/x+(9+x)/y
p=p1-p2+p3*9
print(p)
###Output
38.699999999999996
###Markdown
增强型赋值运算
###Code
1 +=1 # 1=1+1
###Output
_____no_output_____
###Markdown
类型转换- float -> int- 四舍五入 round
###Code
round(3.1415926,2) # 要四舍五入的值,保留位数
###Output
_____no_output_____
###Markdown
EP:- 如果一个年营业税为0.06%,那么对于197.55e+2的年收入,需要交税为多少?(结果保留2为小数)- 必须使用科学计数法
###Code
round(6e-4*197.55e+2,2)
###Output
_____no_output_____
###Markdown
Project- 用Python写一个贷款计算器程序:输入的是月供(monthlyPayment) 输出的是总还款数(totalpayment)
###Code
贷款数,月利率,年限=eval(input('money,month rate,year'))
月供=(贷款数*月利率)/(1-(1/(1+月利率)**(年限*12)))
总还贷数=月供*年限*12
print(总还款数)
###Output
_____no_output_____
###Markdown
Homework- 1
###Code
Celsius=eval(input('请输入摄氏温度'))
Fahrenheit=(9/5)*Celsius+32
print(str(Celsius)+'摄氏温度转为华氏温度为:'+str(Fahrenheit))
###Output
请输入摄氏温度43
43摄氏温度转为华氏温度为:109.4
###Markdown
- 2
###Code
radius,length=eval(input('>>'))
area=radius * radius * 3.1415926
volum=area*length
print(area)
print(volum)
###Output
_____no_output_____
###Markdown
- 3
###Code
ying=eval(input('>>'))
M=ying*0.305
print(M)
###Output
>>16.5
5.0325
###Markdown
- 4
###Code
M=eval(input('kilograms:'))
initialTemperature=eval(input('initialTemperature'))
finaTemperature=eval(input('finaTemperature'))
Q=M*(finaTemperature-initialTemperature)*4184
print('The energy needed is:'+str(Q))
###Output
kilograms:55.5
initialTemperature3.5
finaTemperature10.5
The energy needed is:1625484.0
###Markdown
- 5
###Code
ce,nl=eval(input('>>'))
lx=ce*(nl/1200)
print('利息:'+str(lx))
###Output
>>1000,3.5
利息2.916666666666667
###Markdown
- 6
###Code
v0,v1,t=eval(input('v0,v1,t'))
a=(v1-v0)/t
print(a)
###Output
v0,v1,t5.5,50.9,4.5
10.088888888888889
###Markdown
- 7 进阶
###Code
a=eval(input('>>'))
i=0
lv=1+0.00417
b=0
while i<=5:
b=(a+b)*lv
i +=1
round(b,2)
###Output
>>100
###Markdown
- 8 进阶
###Code
s=eval(input('请输入数字:'))
a=s//100
b=s//10%10
c=s%10
end=a+b+c
print(end)
###Output
请输入数字:999
27
###Markdown
基本程序设计- 一切代码输入,请使用英文输入法
###Code
print('hello world')
###Output
hello world
###Markdown
编写一个简单的程序- 圆公式面积: area = radius \* radius \* 3.1415
###Code
radius = 2.0 #在python中不需要定义数据类型
area = radius * radius * 3.1415
print(area)
###Output
78.53750000000001
###Markdown
在Python里面不需要定义数据的类型 控制台的读取与输入- input 输入进去的是字符串- eval
###Code
radius = int(input('请输入一个半径')) #求圆的面积
area = radius * radius * 3.1415
print(area)
length = int (input('请输入一个边长')) #求正方形的面积 int可以换成eval更方便,不需要整型定义int 然后浮点型定义float 这样太麻烦,eval都可以识别
area = length * length
print(area)
import os
input_ = input('今天好开心')
os.system('calc')
###Output
今天好开心1*2
###Markdown
- 在jupyter用shift + tab 键可以跳出解释文档 变量命名的规范- 由字母、数字、下划线构成- 不能以数字开头 \*- 标识符不能是关键词(实际上是可以强制改变的,但是对于代码规范而言是极其不适合)- 可以是任意长度- 驼峰式命名 变量、赋值语句和赋值表达式- 变量: 通俗理解为可以变化的量- x = 2 \* x + 1 在数学中是一个方程,而在语言中它是一个表达式- test = test + 1 \* 变量在赋值之前必须有值
###Code
radius = eval(input('>>'))
radius = radius + radius
print(radius)
###Output
>>10
20
###Markdown
同时赋值var1, var2,var3... = exp1,exp2,exp3...
###Code
小明,小红,小蓝 = '男' , '女' , '不男不女'
print(小明,小红,小蓝)
height,width = eval(input('>>'))
area1_ = height * width #area之前定义过,就不能用了
print(area1_)
###Output
>>2,2
4
###Markdown
定义常量- 常量:表示一种定值标识符,适合于多次使用的场景。比如PI- 注意:在其他低级语言中如果定义了常量,那么,该常量是不可以被改变的,但是在Python中一切皆对象,常量也是可以被改变的 数值数据类型和运算符- 在Python中有两种数值类型(int 和 float)适用于加减乘除、模、幂次 运算符 /、//、**
###Code
10 / 10 #10除10
10 // 10 #10整除10
10 ** 2 #取幂
###Output
_____no_output_____
###Markdown
运算符 % EP:- 25/4 多少,如果要将其转变为整数该怎么改写- 输入一个数字判断是奇数还是偶数- 进阶: 输入一个秒数,写一个程序将其转换成分和秒:例如500秒等于8分20秒- 进阶: 如果今天是星期六,那么10天以后是星期几? 提示:每个星期的第0天是星期天
###Code
25 // 4 #或者int (25/4)
num = eval(input('>>'))#输入一个数字判断是奇数还是偶数
if num % 2 == 0:
print('输入的数字'+str(num)+'是偶数')
else:
print('输入的数字'+str(num)+'是奇数')
times = eval(input('>>'))#输入一个秒数,写一个程序将其转换成分和秒:例如500秒等于8分20秒
mins = times // 60
times = times % 60
print(str(mins) + '分',str(times) +'秒')
weekend = eval(input('>>'))#进阶: 如果今天是星期六,那么10天以后是星期几? 提示:每个星期的第0天是星期天
res = (weekend + 10) % 7
print('今天是周'+str(weekend)+',10天之后是星期'+str(res))
###Output
>>5
今天是周5,10天之后是星期1
###Markdown
科学计数法- 1.234e+2- 1.234e-2
###Code
1.234e+2
1.234e-2
###Output
_____no_output_____
###Markdown
计算表达式和运算优先级
###Code
x = 10
y = 6
a = 0
b = 1
c = 1
sum=(3+4*x)/5-10*(y-5)*(a+b+c)/x+9*(4/x+(9+x)/y)
print(sum)
###Output
38.699999999999996
###Markdown
增强型赋值运算 类型转换- float -> int- 四舍五入 round
###Code
round(3.14)#四舍五入用round
round(3.1415926,2)#要四舍五入的值,第二个保留位数
###Output
_____no_output_____
###Markdown
EP:- 如果一个年营业税为0.06%,那么对于197.55e+2的年收入,需要交税为多少?(结果保留2为小数)- 必须使用科学计数法
###Code
round((0.06e-2) * 197.55e+2,2)
###Output
_____no_output_____
###Markdown
Project- 用Python写一个贷款计算器程序:输入的是月供(monthlyPayment) 输出的是总还款数(totalpayment)
###Code
贷款数,月供,月利率,年限=eval(input('money,month,rate,years'))
月供 = (贷款数 * 月利率) / (1 - (1 / (1 + 月利率)**(年限 * 12)))
总还款数 = 月供 * 年限 * 12
print(总还款数)#输入的时候记得用逗号隔开
###Output
money,month,rate,years100,10,2,2
4800.000000016996
###Markdown
Homework- 1
###Code
celsius = eval(input('请输入一个摄氏温度:')) #读取一个摄氏温度
fahrenheit = (9 / 5) * celsius + 32
print(str(celsius)+'摄氏度是'+str(fahrenheit)+'华氏度')
###Output
请输入一个摄氏温度:43
43摄氏度是109.4华氏度
###Markdown
- 2
###Code
radius,height= eval(input('请输入一个圆柱体的半径和高:'))
area = radius * radius * 3.14
volume = area * height
print('此圆柱体的面积是'+str(area))
print('此圆柱体的体积是'+str(volume))
###Output
请输入一个圆柱体的半径和高:5.5,12
此圆柱体的面积是94.985
此圆柱体的体积是1139.82
###Markdown
- 3
###Code
feet = eval(input('请输入一个英尺数:'))
meters = 0.305 * feet
print(str(feet)+'英尺是'+str(meters)+'米')
###Output
请输入一个英尺数:16.5
16.5英尺是5.0325米
###Markdown
- 4
###Code
M = eval(input('请输入一个以千克计算的水量:'))
initialTemperature = eval(input('请输入一个初始温度:'))
finalTemperature = eval(input('请输入一个最终温度:'))
Q = M * (finalTemperature - initialTemperature) * 4184
print('所需能量为:'+str(Q))
###Output
请输入一个以千克计算的水量:55.5
请输入一个初始温度:3.5
请输入一个最终温度:10.5
所需能量为:1625484.0
###Markdown
- 5
###Code
balance,interestRate = eval(input('Enter balance and interest rate:'))
interest = balance * (interestRate / 1200)
print('The interest is '+str(interest))
###Output
Enter balance and interest rate:1000,3.5
The interest is 2.916666666666667
###Markdown
- 6
###Code
v0,v1,t = eval(input('Enter v0, v1, and t: '))
a = (v1-v0) / t
print('The average acceleration is '+str(a))
###Output
Enter v0, v1, and t: 5.5,50.9,4.5
The average acceleration is 10.088888888888889
###Markdown
- 7 进阶
###Code
amount = eval (input('Enter the monthly saving amount:'))
i = 0
account = 0
for num in range (0,6):
account = (amount + account) * ( 1 + 0.05/12 )
i += 1
else:
print ('After the sixth month, the account value is '+str(account))
###Output
Enter the monthly saving amount:100
After the sixth month, the account value is 608.811017705596
###Markdown
- 8 进阶
###Code
number = eval(input('Enter a number between 0 and 1000: '))
first = number % 10
second = number // 10 % 10
third = number // 100
sum = first + second + third
print('The sum of the digits is: '+str(sum))
###Output
Enter a number between 0 and 1000: 999
The sum of the digits is: 27
###Markdown
基本程序设计- 一切代码输入,请使用英文输入法 编写一个简单的程序- 圆公式面积: area = radius \* radius \* 3.1415
###Code
bianchang = int(input('>>'))
area=bianchang*bianchang
print(area)
###Output
_____no_output_____
###Markdown
在Python里面不需要定义数据的类型 控制台的读取与输入- input 输入进去的是字符串- eval - 在jupyter用shift + tab 键可以跳出解释文档 变量命名的规范- 由字母、数字、下划线构成- 不能以数字开头 \*- 标识符不能是关键词(实际上是可以强制改变的,但是对于代码规范而言是极其不适合)- 可以是任意长度- 驼峰式命名 变量、赋值语句和赋值表达式- 变量: 通俗理解为可以变化的量- x = 2 \* x + 1 在数学中是一个方程,而在语言中它是一个表达式- test = test + 1 \* 变量在赋值之前必须有值 同时赋值var1, var2,var3... = exp1,exp2,exp3... 定义常量- 常量:表示一种定值标识符,适合于多次使用的场景。比如PI- 注意:在其他低级语言中如果定义了常量,那么,该常量是不可以被改变的,但是在Python中一切皆对象,常量也是可以被改变的 数值数据类型和运算符- 在Python中有两种数值类型(int 和 float)适用于加减乘除、模、幂次 运算符 /、//、** 运算符 % EP:- 25/4 多少,如果要将其转变为整数该怎么改写- 输入一个数字判断是奇数还是偶数- 进阶: 输入一个秒数,写一个程序将其转换成分和秒:例如500秒等于8分20秒- 进阶: 如果今天是星期六,那么10天以后是星期几? 提示:每个星期的第0天是星期天
###Code
num = int(input('>>'))
if num%2==0:
print('输入数字'+str(num)+'是偶数')
else:
print('奇数')
s = int(input('>>'))
h1=s//60
h2=s%60
print(str(h1)+'分',str(h2)+'秒')
###Output
_____no_output_____
###Markdown
科学计数法- 1.234e+2- 1.234e-2 计算表达式和运算优先级
###Code
x = 10
y = 6
a = 0
b = 1
c = 1
p1 = 4/x+(9+x)/y
p2 = 10*(y-5)*(a+b+c)
p3 = (3+4*x)/5
p = p3-p2/x+9*p1
print(p)
###Output
_____no_output_____
###Markdown
增强型赋值运算 类型转换- float -> int- 四舍五入 round EP:- 如果一个年营业税为0.06%,那么对于197.55e+2的年收入,需要交税为多少?(结果保留2为小数)- 必须使用科学计数法 Project- 用Python写一个贷款计算器程序:输入的是月供(monthlyPayment) 输出的是总还款数(totalpayment) Homework- 1
###Code
celsius = eval(input('>>'))
fahrenheit = (9/5)*celsius
print('将摄氏温度'+str(celsius)+'转换成华氏温度'+str(fahrenheit))
###Output
>>38
将摄氏温度38转换成华氏温度68.4
###Markdown
- 2
###Code
radius = eval(input('半径:'))
length =eval(input('高:'))
area = radius*radius*3.1415926
volume=area*length
print('圆柱体底面积是:'+str(area)+',体积是:'+str(volume))
###Output
半径:5.5
高:12
圆柱体底面积是:95.03317615,体积是:1140.3981138
###Markdown
- 3
###Code
yingchi =eval(input('英尺:'))
mishu = yingchi*3.05e-1
print(str(yingchi)+'英尺抓换成米数为'+str(mishu))
###Output
英尺:16.5
16.5英尺抓换成米数为5.0325
###Markdown
- 4
###Code
M =eval(input('水量:'))
initialTemperature = eval(input('初始温度:'))
finalTemperature = eval(input('最终温度:'))
Q = (M*(finalTemperature - initialTemperature)*4184)
print('将水加热所需能量'+str(Q))
###Output
水量:55.5
初始温度:3.5
最终温度:10.5
将水加热所需能量1625484.0
###Markdown
- 5
###Code
chae = eval(input('差额:'))
nianlilv = eval(input('年利率:'))
interest = round(chae*(nianlilv/1200),5)
print('下月要付的利息为'+str(interest))
###Output
差额:3.5
年利率:1000
下月要付的利息为2.91667
###Markdown
- 6
###Code
v0 = eval(input('初始速度:'))
v1 = eval(input('末速度:'))
t = eval(input('时间:'))
a=(v1-v0)/t
print('平均加速度为'+str(a))
###Output
初始速度:5.5
末速度:50.9
时间:4.5
平均加速度为10.088888888888889
###Markdown
- 7 进阶
###Code
number= eval(input('>>'))
m1=number*(1+4.17e-3)
m2=(number+m1)*(1+4.17e-3)
m3=(number+m2)*(1+4.17e-3)
m4=(number+m3)*(1+4.17e-3)
m5=(number+m4)*(1+4.17e-3)
m6=(number+m5)*(1+4.17e-3)
print('六个月以后的账户总额为'+str(m6))
number= eval(input('>>'))
i=0
b=0
l=1+4.17e-3
while i<=5:
b=(number+b)*l
i+=1
print('六个月以后的账户总额为'+str(b))
###Output
>>100
六个月以后的账户总额为608.8181155768638
###Markdown
- 8 进阶
###Code
number= eval(input('输入0~1000之间的数:'))
numbera=number%10
numberb=number//10
numberc=numberb%10
numberd=numberb//10
numbere=numberd%10
sum=numbera+numberc+numbere
print('各位数字之和'+str(sum))
###Output
输入0~1000之间的数:999
各位数字之和27
###Markdown
基本程序设计- 一切代码输入,请使用英文输入法
###Code
print('hello word')
###Output
hello word
###Markdown
编写一个简单的程序- 圆公式面积: area = radius \* radius \* 3.1415eval 自己识别数字类型
###Code
radius = eval(input('输入半径'))
area = radius * radius * 3.1415
print(area)
###Output
输入半径5
78.53750000000001
###Markdown
在Python里面不需要定义数据的类型 控制台的读取与输入- input 输入进去的是字符串- eval - 在jupyter用shift + tab 键可以跳出解释文档 变量命名的规范- 由字母、数字、下划线构成- 不能以数字开头 \*- 标识符不能是关键词(实际上是可以强制改变的,但是对于代码规范而言是极其不适合)- 可以是任意长度- 驼峰式命名 变量、赋值语句和赋值表达式- 变量: 通俗理解为可以变化的量- x = 2 \* x + 1 在数学中是一个方程,而在语言中它是一个表达式- test = test + 1 \* 变量在赋值之前必须有值 同时赋值var1, var2,var3... = exp1,exp2,exp3... 定义常量- 常量:表示一种定值标识符,适合于多次使用的场景。比如PI- 注意:在其他低级语言中如果定义了常量,那么,该常量是不可以被改变的,但是在Python中一切皆对象,常量也是可以被改变的 数值数据类型和运算符- 在Python中有两种数值类型(int 和 float)适用于加减乘除、模、幂次 运算符 /、//、** 运算符 % EP:- 25/4 多少,如果要将其转变为整数该怎么改写- 输入一个数字判断是奇数还是偶数- 进阶: 输入一个秒数,写一个程序将其转换成分和秒:例如500秒等于8分20秒- 进阶: 如果今天是星期六,那么10天以后是星期几? 提示:每个星期的第0天是星期天
###Code
25/4
25//4
shu = eval(input('输入一个数'))
if shu%2 == 0:
print('输入的数字'+str(shu)+'是偶数')
else:
print('输入的数字'+str(shu)+'是奇数')
miao = eval(input('输入一个秒数'))
fen = miao // 60
miao1=miao % 60
print(fen,miao1)
weekend=eval(input("输入星期"))
res=(weekend+10)%7
print(str(weekend)+"在十天之后是"+str(res))
###Output
>>1
1在十天之后是4
###Markdown
科学计数法- 1.234e+2- 1.234e-2 计算表达式和运算优先级
###Code
x,y,a,b,c = 10,6,0,1,1
jieguo1 =( 3 + 4 * x ) / 5
jieguo2 =(10 * ( y - 5 ) * ( a + b + c )) / x
jieguo3 =9 * ( 4 / x + ( 9 + x ) / y )
jieguo =jieguo1 - jieguo2 + jieguo3
print(jieguo)
###Output
38.699999999999996
###Markdown
增强型赋值运算 类型转换- float -> int- 四舍五入 round EP:- 如果一个年营业税为0.06%,那么对于197.55e+2的年收入,需要交税为多少?(结果保留2为小数)- 必须使用科学计数法
###Code
round(197.55e+2 * 6e-4,2)
###Output
_____no_output_____
###Markdown
Project- 用Python写一个贷款计算器程序:输入的是月供(monthlyPayment) 输出的是总还款数(totalpayment)
###Code
贷款数,月利率,年限=eval(input('money,month rate,years'))
月供 = (贷款数 * 月利率)/(1 - (1 / (1+月利率)** (年限 * 12)))
总还款数=月供*年限*12
print(总还款数)
###Output
money,month rate,years1000,0.1,5
6019.770553530245
###Markdown
Homework- 1
###Code
celsius = eval(input('输入摄氏温度'))
fahrenheit = ( 9 / 5 ) * celsius + 32
print(fahrenheit)
###Output
输入摄氏温度43
109.4
###Markdown
- 2
###Code
radius,length = eval(input('输入半径和高'))
area = radius * radius * 3.1415926
volume = area * length
print(round(area,4),round(volume,1))
###Output
输入半径和高5.5,12
95.0332 1140.4
###Markdown
- 3
###Code
vaule= eval(input('输入英尺数'))
meters=vaule*0.305
print(meters)
###Output
输入英尺数16.5
5.0325
###Markdown
- 4
###Code
amount,initial,final = eval(input('输入水量,初始温度,最终温度'))
reliang=amount*(final - initial)*4184
print(reliang)
###Output
输入水量,初始温度,最终温度55.5,3.5,10.5
1625484.0
###Markdown
- 5
###Code
balance,interest = eval(input('输入差额和年利率'))
lixi=balance*(interest/1200)
print(round(lixi,5))
###Output
输入差额和年利率1000,3.5
2.91667
###Markdown
- 6
###Code
v0,v1,t = eval(input('输入初始速度,末速度,时间'))
a=(v1-v0)/t
print(round(a,4))
###Output
输入初始速度,末速度,时间5.5,50.9,4.5
10.0889
###Markdown
- 7 进阶
###Code
monthly = eval(input('输入金额'))
value1=(1+0.00417)*100
value2=(1+0.00417)*(100+value1)
value3=(1+0.00417)*(100+value2)
value4=(1+0.00417)*(100+value3)
value5=(1+0.00417)*(100+value4)
value6=(1+0.00417)*(100+value5)
print(round(value6,2))
monthly = eval(input('输入金额'))
i=0
while i<=5
mountly = (1+0.00417)*(mountly)
i+=1
print(mountly)
###Output
_____no_output_____
###Markdown
- 8 进阶
###Code
number = eval(input('输入数字'))
a= number % 10
b= number // 100
c= number // 10 %10
d= a+b+c
print(d)
###Output
输入数字999
27
###Markdown
基本程序设计- 一切代码输入,请使用英文输入法 编写一个简单的程序- 圆公式面积: area = radius \* radius \* 3.1415
###Code
radius =int(input('请输入园的边长'))
area = radius * radius * 3.1415
print(area)
###Output
请输入园的边长5
78.53750000000001
###Markdown
在Python里面不需要定义数据的类型 控制台的读取与输入- input 输入进去的是字符串- eval
###Code
radius =int(input('请输入正方形的边长'))
area = radius * radius
print(area)
###Output
请输入正方形的边长5
25
###Markdown
- 在jupyter用shift + tab 键可以跳出解释文档 变量命名的规范- 由字母、数字、下划线构成- 不能以数字开头 \*- 标识符不能是关键词(实际上是可以强制改变的,但是对于代码规范而言是极其不适合)- 可以是任意长度- 驼峰式命名 变量、赋值语句和赋值表达式- 变量: 通俗理解为可以变化的量- x = 2 \* x + 1 在数学中是一个方程,而在语言中它是一个表达式- test = test + 1 \* 变量在赋值之前必须有值 同时赋值var1, var2,var3... = exp1,exp2,exp3... 定义常量- 常量:表示一种定值标识符,适合于多次使用的场景。比如PI- 注意:在其他低级语言中如果定义了常量,那么,该常量是不可以被改变的,但是在Python中一切皆对象,常量也是可以被改变的 数值数据类型和运算符- 在Python中有两种数值类型(int 和 float)适用于加减乘除、模、幂次 运算符 /、//、** 运算符 % EP:- 25/4 多少,如果要将其转变为整数该怎么改写- 输入一个数字判断是奇数还是偶数- 进阶: 输入一个秒数,写一个程序将其转换成分和秒:例如500秒等于8分20秒- 进阶: 如果今天是星期六,那么10天以后是星期几? 提示:每个星期的第0天是星期天
###Code
25 //4
int(25/4)
num = eval(input('>>'))
if num % 2 ==0:
print(str(num)+'是偶数')
else:
print('奇数')
times = eval(input('>>'))
mins = times // 60
times_ = times % 60
print(str(mins)+'分',str(times_)+'秒')
weekend = eval(input('>>'))
res = (weekend + 10) % 7
print(str(weekend)+'在10天之后是星期'+str(res))
###Output
>>10
10在10天之后是星期6
###Markdown
科学计数法- 1.234e+2- 1.234e-2 计算表达式和运算优先级
###Code
x = 10
y = 6
a = 0
b = 1
c = 1
q = ( 3 + 4 * x ) / 5
w = ( 10 * ( y - 5 ) * ( a + b + c ) ) / x
e = 9 * ( 4 / x + ( 9 + x ) / y )
r = q - w + e
print(q,w,e,r)
###Output
8.6 2.0 32.099999999999994 38.699999999999996
###Markdown
增强型赋值运算
###Code
a = 1
a += 1 #a = a+1
print(a)
###Output
2
###Markdown
类型转换- float -> int- 四舍五入 round
###Code
round(3.1415926,5) #要四舍五入的值,保留位数
###Output
_____no_output_____
###Markdown
EP:- 如果一个年营业税为0.06%,那么对于197.55e+2的年收入,需要交税为多少?(结果保留2为小数)- 必须使用科学计数法
###Code
round(6e-4 * 192.55e+2 )
###Output
_____no_output_____
###Markdown
Project- 用Python写一个贷款计算器程序:输入的是月供(monthlyPayment) 输出的是总还款数(totalpayment)
###Code
贷款率,月利率,年限 = eval(input('>>'))
月供 = (贷款率 * 月利率) / (1-(1/(1+月利率)**(年限 * 12)))
总还款数 = 月供 * 年限 *12
print(总还款数)
###Output
>>1,2,1
24.000045160319132
###Markdown
Homework- 1
###Code
celsius = eval(input('Enter a degree inCelsius:'))
fahrenheit = (9 / 5) * celsius + 32
print(str(celsius) + ' Celsius is ' + str(fahrenheit) + ' Fahrenheit')
###Output
Enter a degree inCelsius:43
43 Celsius is 109.4 Fahrenheit
###Markdown
- 2
###Code
radius,length = eval(input('Enter the radius and length of a cylinder:'))
area = radius * radius * 3.14
volume = area * length
print( 'The area is ' + str(area))
print( 'The volume is ' + str(volume))
###Output
Enter the radius and length of a cylinder:5.5,12
The area is 94.985
The volume is 1139.82
###Markdown
- 3
###Code
feet = eval(input('Enter a value for feet:'))
meters = feet * 0.305
print( str(feet) + ' feet is ' + str(meters) + ' meters')
###Output
_____no_output_____
###Markdown
- 4
###Code
M = eval(input('Enter the amount of water in kilograms:'))
initialtemperature = eval(input('Enter the initial temperature:'))
finaltemperature = eval(input('Enter the final temperature:'))
Q = M * (finaltemperature - initialtemperature) * 4184
print( 'The energy needed is ' + str(Q) )
###Output
Enter the amount of water in kilograms:55.5
Enter the initial temperature:3.5
Enter the final temperature:10.5
The energy needed is 1625484.0
###Markdown
- 5
###Code
差额,年利率 = eval(input('Enter balance and interest rate(e.g., 3 for 3%):'))
利息 = 差额 * (年利率 / 1200)
print( 'The interest is ' + str(利息) )
###Output
Enter balance and interest rate(e.g., 3 for 3%):1000,3.5
The interest is 2.916666666666667
###Markdown
- 6
###Code
v0,v1,t = eval(input('Enter v0,v1,and t:'))
a = (v1 - v0 ) / t
print('The average acceleration is ' + str(a))
###Output
Enter v0,v1,and t:5.5,50.9,4.5
The average acceleration is 10.088888888888889
###Markdown
- 7 进阶 - 8 进阶
###Code
i = eval(input('Enter a number between 0 and 1000:'))
print('The sum of the digigs is ' + str())
###Output
Enter a number between 0 and 1000:932
4
###Markdown
基本程序设计- 一切代码输入,请使用英文输入法
###Code
radius = 10
area = radius*radius*3.1425
print(area)
###Output
314.25
###Markdown
编写一个简单的程序- 圆公式面积: area = radius \* radius \* 3.1415
###Code
import os
input_ = input('今天天气不错')
os.system('clac')
###Output
_____no_output_____
###Markdown
在Python里面不需要定义数据的类型 控制台的读取与输入- input 输入进去的是字符串- eval
###Code
bianchang=int(input('请输入一个边长:'))
area = bianchang*bianchang
print(area)
###Output
请输入一个边长:4
16
###Markdown
- 在jupyter用shift + tab 键可以跳出解释文档 变量命名的规范- 由字母、数字、下划线构成- 不能以数字开头 \*- 标识符不能是关键词(实际上是可以强制改变的,但是对于代码规范而言是极其不适合)- 可以是任意长度- 驼峰式命名 变量、赋值语句和赋值表达式- 变量: 通俗理解为可以变化的量- x = 2 \* x + 1 在数学中是一个方程,而在语言中它是一个表达式- test = test + 1 \* 变量在赋值之前必须有值 同时赋值var1, var2,var3... = exp1,exp2,exp3... 定义常量- 常量:表示一种定值标识符,适合于多次使用的场景。比如PI- 注意:在其他低级语言中如果定义了常量,那么,该常量是不可以被改变的,但是在Python中一切皆对象,常量也是可以被改变的 数值数据类型和运算符- 在Python中有两种数值类型(int 和 float)适用于加减乘除、模、幂次 运算符 /、//、** 运算符 % EP:- 25/4 多少,如果要将其转变为整数该怎么改写- 输入一个数字判断是奇数还是偶数- 进阶: 输入一个秒数,写一个程序将其转换成分和秒:例如500秒等于8分20秒- 进阶: 如果今天是星期六,那么10天以后是星期几? 提示:每个星期的第0天是星期天
###Code
25/4
num=eval(input('输入一个数:'))
if num%2==0:
print(str(num) + '是偶数')
else :
print(str(num) + '是奇数')
seconds = eval(input('输入一个秒数:'))
min1=seconds//60
seconds_=seconds%60
print(str(min1) + '分'+ str(seconds_)+ '秒')
weekend = eval(input('今天是星期:'))
num=(weekend + 10) % 7
print('在10天后是星期'+str(num))
x,y,a,b,c=10,6,0,1,1
sum1=(3+4*x)/5
sum2=(10*(y-5)*(a+b+c))/x
sum3=9*(4/x+(9+x)/y)
sum_=sum1-sum2+sum3
print(sum_)
###Output
38.699999999999996
###Markdown
科学计数法- 1.234e+2- 1.234e-2 计算表达式和运算优先级 增强型赋值运算 类型转换- float -> int- 四舍五入 round
###Code
round(197.55e+2 * 6e-4,2)
###Output
_____no_output_____
###Markdown
EP:- 如果一个年营业税为0.06%,那么对于197.55e+2的年收入,需要交税为多少?(结果保留2为小数)- 必须使用科学计数法 Project- 用Python写一个贷款计算器程序:输入的是月供(monthlyPayment) 输出的是总还款数(totalpayment) Homework- 1
###Code
celsius = eval(input('请输入一个摄氏温度:'))
fahrenheit = (9/5)*celsius +32
print(str(celsius)+'摄氏度,转换为华氏温度为:'+str(fahrenheit))
###Output
请输入一个摄氏温度:32
32摄氏度,转换为华氏温度为:89.6
###Markdown
- 2
###Code
radius,length = eval(input('请分别输入圆柱的半径和高:'))
area = radius*radius*3.14
volume=area*length
print('圆柱的底面积为:'+str(area)+'圆柱的体积为:'+str(volume))
###Output
请分别输入圆柱的半径和高:5,10
圆柱的底面积为:78.5圆柱的体积为:785.0
###Markdown
- 3
###Code
feet = eval(input('请输入英尺长度:'))
meter=0.305*feet
print(str(feet)+'英尺等于'+str(meter)+'米')
###Output
请输入英尺长度:30
30英尺等于9.15米
###Markdown
- 4
###Code
weight = eval(input('请依次输入水的质量(KG):'))
initialTemperature = eval(input('请输入水的初始温度:'))
finalTemperature = eval(input('请输入水的最终温度:'))
Q = weight * (finalTemperature - initialTemperature) * 4184
print('所需的能量(单位焦耳)是'+str(Q))
###Output
请依次输入水的质量(KG):3
请输入水的初始温度:10
请输入水的最终温度:100
所需的能量(单位焦耳)是1129680
###Markdown
- 5
###Code
差额,年利率 = eval(input('请依次输入差额和年利率:'))
利息 = 差额 * (年利率/1200)
print('月供利息为:'+str(利息))
###Output
请依次输入差额和年利率:1000,3.5
月供利息为:2.916666666666667
###Markdown
- 6
###Code
v0,v1,t = eval(input('请依次输入初速度(m/s)、末速度(m/s)和加速时间s:'))
a = (v1-v0)/t
print('加速度为(m/s):'+str(a))
###Output
请依次输入初速度(m/s)、末速度(m/s)和加速时间s:5.5,50.9,4.5
加速度为(m/s):10.088888888888889
###Markdown
- 7 进阶
###Code
存款 = eval(input('请输入每月存入金额(单位元):'))
i = 1
sum_ = 0
while i <= 6:
sum_=(sum_+存款)*(1+(5e-2)/12)
i += 1
print('6月后存款为:'+str(sum_))
###Output
请输入每月存入金额(单位元):100
6月后存款为:608.811017705596
###Markdown
- 8 进阶
###Code
num1 = int (input('请输入一个0到1000的整数:'))
if 1<num1<1000:
bai=num1//100
shi=num1//10-bai*10
ge=num1%10
sum_=bai+shi+ge
print('数字'+str(num1)+'各个位数之和为:'+ str(sum_))
else :
print('数字超出范围!!!')
###Output
请输入一个0到1000的整数:999
数字999各个位数之和为:27
###Markdown
基本程序设计- 一切代码输入,请使用英文输入法
###Code
radius = int(input('>>'))
radius = radius + radius
print(radius)
int(25/4)
count = eval (input('>>'))
if count%2==0:
print('输入的数字'+str(num)+'是偶数')
else:
print('ji')
###Output
_____no_output_____
###Markdown
编写一个简单的程序- 圆公式面积: area = radius \* radius \* 3.1415
###Code
times=eval (input('>>'))
mins = times // 60
times=times % 60
print(str(mins)+'分'str(times)+'秒')
weekend = eval (input('>>'))
res = (weekend + 10)%7
print(str(weekend )+'在十天之后是星期几'str(res))
x = 10
y = 6
a = 0
b = 1
c =1
part1 = (3 + 4 * x) / 3
part2 = (10 * (y - 5) * (a + b + c )) / x
part3 = 9 * ((4 / x) + (9 + x) / y )
p= part1 - part2 + part3
print(p)
###Output
44.43333333333333
###Markdown
在Python里面不需要定义数据的类型 控制台的读取与输入- input 输入进去的是字符串- eval - 在jupyter用shift + tab 键可以跳出解释文档 变量命名的规范- 由字母、数字、下划线构成- 不能以数字开头 \*- 标识符不能是关键词(实际上是可以强制改变的,但是对于代码规范而言是极其不适合)- 可以是任意长度- 驼峰式命名 变量、赋值语句和赋值表达式- 变量: 通俗理解为可以变化的量- x = 2 \* x + 1 在数学中是一个方程,而在语言中它是一个表达式- test = test + 1 \* 变量在赋值之前必须有值 同时赋值var1, var2,var3... = exp1,exp2,exp3... 定义常量- 常量:表示一种定值标识符,适合于多次使用的场景。比如PI- 注意:在其他低级语言中如果定义了常量,那么,该常量是不可以被改变的,但是在Python中一切皆对象,常量也是可以被改变的 数值数据类型和运算符- 在Python中有两种数值类型(int 和 float)适用于加减乘除、模、幂次 运算符 /、//、** 运算符 % EP:- 25/4 多少,如果要将其转变为整数该怎么改写- 输入一个数字判断是奇数还是偶数- 进阶: 输入一个秒数,写一个程序将其转换成分和秒:例如500秒等于8分20秒- 进阶: 如果今天是星期六,那么10天以后是星期几? 提示:每个星期的第0天是星期天 科学计数法- 1.234e+2- 1.234e-2 计算表达式和运算优先级
###Code
round(0.06e-4*197.55e+2,2)
贷款数,月利率,年限=eval (input ('money,month rate,year'))
月供=(贷款数*月利率)/(1-(1/(1+月利率)**(年限*12)))
总还款数
###Output
_____no_output_____
###Markdown
增强型赋值运算 类型转换- float -> int- 四舍五入 round EP:- 如果一个年营业税为0.06%,那么对于197.55e+2的年收入,需要交税为多少?(结果保留2为小数)- 必须使用科学计数法 Project- 用Python写一个贷款计算器程序:输入的是月供(monthlyPayment) 输出的是总还款数(totalpayment) Homework- 1
###Code
celsius = eval (input('>>'))
fahrenheit = (9 / 5) * celsius + 32
print(str(celsius) + ' celsius is ' + str(fahrenheit) + ' fahrenheit ')
###Output
>>2
2 celsius is 35.6 fahrenheit
###Markdown
- 2
###Code
radius,length= eval (input('>>'))
area = 3.1415926 * radius * radius
volume = area * length
print('The area is ' + str(area) + ' The volume is ' + str(volume))
###Output
>>5.5,12
The area is 95.03317615 The volume is 1140.3981138
###Markdown
- 3
###Code
feet= eval (input('>>'))
meters = 0.305 * feet
print( str(feet) + ' feet is ' + str(meters) + ' meters ' )
###Output
>>16.5
16.5 feet is 5.0325 meters
###Markdown
- 4
###Code
M= eval (input('>>'))
initialTemperature= eval (input('>>'))
finalTemperature= eval (input('>>'))
Q = M * (finalTemperature - initialTemperature) * 4184
print(Q)
###Output
>>55.5
>>3.5
>>10.5
1625484.0
###Markdown
- 5
###Code
chae,nian = eval(input('>>'))
y = chae * (nian / 1200)
print(y)
###Output
>>23,44
0.8433333333333334
###Markdown
- 6
###Code
v0,v1,t = eval(input('>>'))
a = (v1 - v0 ) / t
print(a)
###Output
>>5.5,50.9,4.5
10.088888888888889
###Markdown
- 7 进阶
###Code
account = eval(input('>>'))
i = 0
month=1+0.00417
b=0
while i<=5:
b=(account+b)*month
i +=1
print(b)
###Output
>>100
608.8181155768638
###Markdown
- 8 进阶
###Code
N = eval (input('>>'))
N1 = N % 10
N2 = N// 100
N3 = (N % 100) // 10
N4= N1+ N2 + N3
print(N4)
###Output
>>999
27
###Markdown
基本程序设计- 一切代码输入,请使用英文输入法
###Code
a=int(input('输入长和宽'))
area=a*a
print(area)
###Output
输入长和宽5
25
###Markdown
编写一个简单的程序- 圆公式面积: area = radius \* radius \* 3.1415 在Python里面不需要定义数据的类型 控制台的读取与输入- input 输入进去的是字符串- eval - 在jupyter用shift + tab 键可以跳出解释文档 变量命名的规范- 由字母、数字、下划线构成- 不能以数字开头 \*- 标识符不能是关键词(实际上是可以强制改变的,但是对于代码规范而言是极其不适合)- 可以是任意长度- 驼峰式命名 变量、赋值语句和赋值表达式- 变量: 通俗理解为可以变化的量- x = 2 \* x + 1 在数学中是一个方程,而在语言中它是一个表达式- test = test + 1 \* 变量在赋值之前必须有值 同时赋值var1, var2,var3... = exp1,exp2,exp3... 定义常量- 常量:表示一种定值标识符,适合于多次使用的场景。比如PI- 注意:在其他低级语言中如果定义了常量,那么,该常量是不可以被改变的,但是在Python中一切皆对象,常量也是可以被改变的 数值数据类型和运算符- 在Python中有两种数值类型(int 和 float)适用于加减乘除、模、幂次 运算符 /、//、** 运算符 % EP:- 25/4 多少,如果要将其转变为整数该怎么改写- 输入一个数字判断是奇数还是偶数- 进阶: 输入一个秒数,写一个程序将其转换成分和秒:例如500秒等于8分20秒- 进阶: 如果今天是星期六,那么10天以后是星期几? 提示:每个星期的第0天是星期天
###Code
25 // 4
times=eval(int)
num=eval
x=10
y=6
a=0
b=1
c=1
part1=(3+4*x)/5
part2=10*(y-5)*(a+b+c)/x
part3=9*(4/x+(9+x)/y)
sum=part1-part2+part3
print(sum)
###Output
38.699999999999996
###Markdown
科学计数法- 1.234e+2- 1.234e-2 计算表达式和运算优先级 增强型赋值运算 类型转换- float -> int- 四舍五入 round
###Code
round(3.14159625)
###Output
_____no_output_____
###Markdown
EP:- 如果一个年营业税为0.06%,那么对于197.55e+2的年收入,需要交税为多少?(结果保留2为小数)- 必须使用科学计数法
###Code
round(6e-4*197.55e+2,2)
###Output
_____no_output_____
###Markdown
Project- 用Python写一个贷款计算器程序:输入的是月供(monthlyPayment) 输出的是总还款数(totalpayment) Homework- 1
###Code
celsius=eval(input('>>'))
Fahrenheit=(9/5)*celsius+32
print(str( Fahrenheit))
###Output
>>43
109.4
###Markdown
- 2
###Code
radius,length=eval(input('>>'))
area=radius*radius*3.149268
volume=area*length
print(area,volume)
###Output
>>5.5,12
94.985 1139.82
###Markdown
- 3
###Code
feet=eval(input('>>'))
mi=feet*0.305
print(str(mi))
###Output
>>16.5
5.0325
###Markdown
- 4
###Code
M,it,ft=eval(input('请用户输入水量以及水的初始温度和最终温度'))
Q=M*(ft-it)*4184
print(Q)
###Output
请用户输入水量以及水的初始温度和最终温度55.5,3.5,10.5
1625484.0
###Markdown
- 5
###Code
c,n=eval(input('>>'))
s=c*(n/1200)
print(s)
###Output
>>1000,3.5
2.916666666666667
###Markdown
- 6 - 7 进阶
###Code
m=eval(input('>>'))
d=m*(1+0.00417)
for x in range(5):
d=(d+100)*(1+0.00417)
print(d)
###Output
>>100
608.8181155768638
###Markdown
- 8 进阶
###Code
num=eval(input('输入一个0到1000之间的整数'))
a=num%10
b=num//100
sum=(num-100*a-b)/10
print(int(a+b+sum))
###Output
输入一个0到1000之间的整数999
27
###Markdown
基本程序设计- 一切代码输入,请使用英文输入法
###Code
print('hello world')
###Output
hello world
###Markdown
编写一个简单的程序- 圆公式面积: area = radius \* radius \* 3.1415
###Code
radius = int(input('请输入一个半径:'))
area = radius * radius * 3.1433223
print(area)
###Output
请输入一个半径:2
12.5732892
###Markdown
在Python里面不需要定义数据的类型 控制台的读取与输入- input 输入进去的是字符串- eval - 在jupyter用shift + tab 键可以跳出解释文档 变量命名的规范- 由字母、数字、下划线构成- 不能以数字开头 \*- 标识符不能是关键词(实际上是可以强制改变的,但是对于代码规范而言是极其不适合)- 可以是任意长度- 驼峰式命名 变量、赋值语句和赋值表达式- 变量: 通俗理解为可以变化的量- x = 2 \* x + 1 在数学中是一个方程,而在语言中它是一个表达式- test = test + 1 \* 变量在赋值之前必须有值 同时赋值var1, var2,var3... = exp1,exp2,exp3... 定义常量- 常量:表示一种定值标识符,适合于多次使用的场景。比如PI- 注意:在其他低级语言中如果定义了常量,那么,该常量是不可以被改变的,但是在Python中一切皆对象,常量也是可以被改变的 数值数据类型和运算符- 在Python中有两种数值类型(int 和 float)适用于加减乘除、模、幂次 运算符 /、//、** 运算符 % EP:- 25/4 多少,如果要将其转变为整数该怎么改写- 输入一个数字判断是奇数还是偶数- 进阶: 输入一个秒数,写一个程序将其转换成分和秒:例如500秒等于8分20秒- 进阶: 如果今天是星期六,那么10天以后是星期几? 提示:每个星期的第0天是星期天
###Code
25/4
25//4
num = int(input('请输入一个整数:'))
if num%2==0:
print('该数为偶数')
else:
print('该数为奇数')
s = eval(input('请输入一个秒数:'))
min = s//60
s1 = s%60
print (str(min)+'分'+str(s1)+'秒')
###Output
请输入一个秒数:500
8分20秒
###Markdown
科学计数法- 1.234e+2- 1.234e-2 计算表达式和运算优先级 增强型赋值运算 类型转换- float -> int- 四舍五入 round EP:- 如果一个年营业税为0.06%,那么对于197.55e+2的年收入,需要交税为多少?(结果保留2为小数)- 必须使用科学计数法 Project- 用Python写一个贷款计算器程序:输入的是月供(monthlyPayment) 输出的是总还款数(totalpayment) Homework- 1
###Code
celsius = eval(input('请输入一个温度:'))
fahrenheit = (9/5)*celsius+32
print (fahrenheit)
###Output
请输入一个温度:43
109.4
###Markdown
- 2
###Code
import math
radius = eval(input('请输入一个半径:'))
length = eval(input('请输入一个高:'))
area = radius*radius*math.pi
volume = area*length
print (round(area,1))
print (round(volume,1))
###Output
请输入一个半径:5.5
请输入一个高:12
95.0
1140.4
###Markdown
- 3
###Code
feet = eval(input('请输入一个英尺数:'))
meters = feet*0.305
print (meters)
###Output
请输入一个英尺数:16.5
5.0325
###Markdown
- 4
###Code
M = eval(input('请输入以kg为单位的水的质量:'))
initialTemperature = eval(input('请输入水的初始温度:'))
finalTemperature = eval(input('请输入水的最终温度:'))
Q = M*(finalTemperature-initialTemperature)*4184
print (Q)
###Output
请输入以kg为单位的水的质量:55.5
请输入水的初始温度:3.5
请输入水的最终温度:10.5
1625484.0
###Markdown
- 5
###Code
balance = eval(input('请输入差额:'))
rate = eval(input('请输入年利率:'))
interest = (balance*(rate/1200))
print (interest)
###Output
请输入差额:1000
请输入年利率:3.5
2.916666666666667
###Markdown
- 6
###Code
v0 = eval(input('请输入初速度:'))
v1 = eval(input('请输入末速度:'))
t = eval(input('以秒为单位速度变化所占用的时间:'))
a = (v1-v0)/t
print (a)
###Output
请输入初速度:5.5
请输入末速度:50.9
以秒为单位速度变化所占用的时间:4.5
10.088888888888889
###Markdown
- 7 进阶
###Code
money = eval(input('请输入每月的存钱数:'))
one = money*(1+0.00417)
two = (money+one)*(1+0.00417)
three = (money+two)*(1+0.00417)
four = (money+three)*(1+0.00417)
five = (money+four)*(1+0.00417)
six = (money+five)*(1+0.00417)
print (round(six,2))
###Output
请输入每月的存钱数:100
608.82
###Markdown
- 8 进阶
###Code
num = eval(input('请输入一个0到1000的整数:'))
a = num%10
b = num//10
c = b%10
d = b//10
print (a+c+d)
###Output
请输入一个0到1000的整数:152
8
###Markdown
基本程序设计- 一切代码输入,请使用英文输入法 编写一个简单的程序- 圆公式面积: area = radius \* radius \* 3.1415 在Python里面不需要定义数据的类型 控制台的读取与输入- input 输入进去的是字符串- eval - 在jupyter用shift + tab 键可以跳出解释文档 变量命名的规范- 由字母、数字、下划线构成- 不能以数字开头 \*- 标识符不能是关键词(实际上是可以强制改变的,但是对于代码规范而言是极其不适合)- 可以是任意长度- 驼峰式命名 变量、赋值语句和赋值表达式- 变量: 通俗理解为可以变化的量- x = 2 \* x + 1 在数学中是一个方程,而在语言中它是一个表达式- test = test + 1 \* 变量在赋值之前必须有值 同时赋值var1, var2,var3... = exp1,exp2,exp3... 定义常量- 常量:表示一种定值标识符,适合于多次使用的场景。比如PI- 注意:在其他低级语言中如果定义了常量,那么,该常量是不可以被改变的,但是在Python中一切皆对象,常量也是可以被改变的 数值数据类型和运算符- 在Python中有两种数值类型(int 和 float)适用于加减乘除、模、幂次 运算符 /、//、** 运算符 % EP:- 25/4 多少,如果要将其转变为整数该怎么改写- 输入一个数字判断是奇数还是偶数- 进阶: 输入一个秒数,写一个程序将其转换成分和秒:例如500秒等于8分20秒- 进阶: 如果今天是星期六,那么10天以后是星期几? 提示:每个星期的第0天是星期天 科学计数法- 1.234e+2- 1.234e-2 计算表达式和运算优先级 增强型赋值运算 类型转换- float -> int- 四舍五入 round EP:- 如果一个年营业税为0.06%,那么对于197.55e+2的年收入,需要交税为多少?(结果保留2为小数)- 必须使用科学计数法 Project- 用Python写一个贷款计算器程序:输入的是月供(monthlyPayment) 输出的是总还款数(totalpayment) Homework- 1
###Code
celsius = eval(input('输入摄氏温度:'))
fahrenheit = (9/5)*celsius+32
print('华氏温度:'+str(fahrenheit))
###Output
输入摄氏温度:30
华氏温度:86.0
###Markdown
- 2
###Code
import math
radius = eval(input('请输入圆柱的半径'))
length = eval(input('请输入圆柱的高'))
area = radius * radius * math.pi
volume = area * length
print('圆柱底面积:'+str(area)+'\n'+'圆柱体积:'+str(volume))
###Output
请输入圆柱的半径5.5
请输入圆柱的高12
圆柱底面积:95.03317777109125
圆柱体积:1140.398133253095
###Markdown
- 3
###Code
feet = eval(input('输入英尺数值:'))
meters = feet * 0.305
print(str(feet)+'英尺='+str(meters)+'米')
###Output
输入英尺数值:16.5
16.5英尺=5.0325米
###Markdown
- 4
###Code
M = eval(input('水量'))
init = eval(input('水的初始温度'))
final = eval(input('水的最终温度'))
Q = M * (final - init) * 4184
print('能量'+str(Q))
###Output
水量55.5
水的初始温度3.5
水的最终温度10.5
能量1625484.0
###Markdown
- 5
###Code
差额 = eval(input('差额:'))
年利率 = eval(input('年利率'))
利息 = 差额 * (年利率/1200)
print('月供:'+str(利息))
###Output
差额:1000
年利率3.5
月供:2.916666666666667
###Markdown
- 6
###Code
v0 = eval(input('输入初始速度,单位 m/s'))
v1 = eval(input('输入末速度,单位 m/s'))
t = eval(input('输入时间,单位s'))
a = round((v1-v0)/t,4)
print('平均加速度'+str(a))
###Output
输入初始速度,单位 m/s5.5
输入末速度,单位 m/s50.9
输入时间,单位s4.5
平均加速度10.0889
###Markdown
- 7 进阶
###Code
meiyuecunkuan = eval(input('每月存款'))
yuelilv = 4.17e-3
sum=0
for i in range(1,7):
sum=(sum+100) * (1+yuelilv)
total = round(sum,2)
print('账户总额'+str(total))
###Output
每月存款100
账户总额608.82
###Markdown
- 8 进阶
###Code
num = eval(input('输入0-1000范围内的数值'))
unit = num % 10
hundred = num // 100
ten = (num // 10) // 10
sum = unit + hundred+ ten
print('数字之和:'+str(sum))
###Output
输入0-1000范围内的数值999
数字之和:27
###Markdown
基本程序设计- 一切代码输入,请使用英文输入法
###Code
print('hello word')
###Output
hello word
###Markdown
chang=int(input('请输入一个边长'))area=chang * changprint(area) 编写一个简单的程序- 圆公式面积: area = radius \* radius \* 3.1415
###Code
radius=int(input('请输入半径'))
area=radius*radius*3.1415926
print(area)
chang=eval(input('出入一个边长'))
area=chang*chang
print(area)
###Output
出入一个边长5
25
###Markdown
在Python里面不需要定义数据的类型 控制台的读取与输入- input 输入进去的是字符串- eval - 在jupyter用shift + tab 键可以跳出解释文档 变量命名的规范- 由字母、数字、下划线构成- 不能以数字开头 \*- 标识符不能是关键词(实际上是可以强制改变的,但是对于代码规范而言是极其不适合)- 可以是任意长度- 驼峰式命名 变量、赋值语句和赋值表达式- 变量: 通俗理解为可以变化的量- x = 2 \* x + 1 在数学中是一个方程,而在语言中它是一个表达式- test = test + 1 \* 变量在赋值之前必须有值 同时赋值var1, var2,var3... = exp1,exp2,exp3... 定义常量- 常量:表示一种定值标识符,适合于多次使用的场景。比如PI- 注意:在其他低级语言中如果定义了常量,那么,该常量是不可以被改变的,但是在Python中一切皆对象,常量也是可以被改变的 数值数据类型和运算符- 在Python中有两种数值类型(int 和 float)适用于加减乘除、模、幂次 运算符 /、//、** 运算符 %
###Code
EP:
- 25/4 多少,如果要将其转变为整数该怎么改写
- 输入一个数字判断是奇数还是偶数
- 进阶: 输入一个秒数,写一个程序将其转换成分和秒:例如500秒等于8分20秒
- 进阶: 如果今天是星期六,那么10天以后是星期几? 提示:每个星期的第0天是星期天
print(25//4)
x=int(input('输入一个整数'))
if x%2==0:
print('偶数')
else:
print('奇数')
times=eval(input('输入秒数'))
mins=times//60
miao=times%60
print(str(mins) + '分' + str(miao) + '秒')
week = eval(input('请输入当天的星期数为:'))
weekend = (week + 10) % 7
print('今天是星期' + str(week) + ',10天以后星期' + str(weekend))
###Output
请输入当天的星期数为:5
今天是星期5,10天以后星期1
###Markdown
科学计数法- 1.234e+2- 1.234e-2
###Code
1.234e+2
1.234e-2
###Output
_____no_output_____
###Markdown
计算表达式和运算优先级
###Code
x,y,a,b,c = eval(input('请输入:'))
result = ((3+4*x)/5) - ((10*(y-5)*(a+b+c))/x) + (9+(4/x+(9+x)/y))
print('输出的结果是:' + str(result))
###Output
请输入:10,20,30,40,50
输出的结果是:-1781.0500000000002
###Markdown
增强型赋值运算 类型转换- float -> int- 四舍五入 round
###Code
round(3.1415926,2)
###Output
_____no_output_____
###Markdown
EP:- 如果一个年营业税为0.06%,那么对于197.55e+2的年收入,需要交税为多少?(结果保留2为小数)- 必须使用科学计数法
###Code
round(197.55e+2*6e-4,2)
###Output
_____no_output_____
###Markdown
Project- 用Python写一个贷款计算器程序:输入的是月供(monthlyPayment) 输出的是总还款数(totalpayment)
###Code
贷款数,月供,月利率,年限=eval(input('money,month,rate,years'))
月供 = (贷款数 * 月利率) / (1 - (1 / (1 + 月利率)**(年限 * 12)))
还款数=月供*年限*12
print(还款数)
###Output
money,month,rate,years10000,10,0.1,2
26711.946324165066
###Markdown
Homework- 1
###Code
celsius=eval(input('输入摄氏温度:'))
fahrenheit=(9/5)*celsius+32
print(fahrenheit)
###Output
输入摄氏温度:23
73.4
###Markdown
- 2
###Code
height,radius=eval(input('输入高和半径'))
area=radius*radius*3.14
volume=area*height
print(area,volume)
###Output
输入高和半径2,2
12.56 25.12
###Markdown
- 3
###Code
x=eval(input('输入一个英尺数'))
mi=x*0.305
print(mi)
###Output
输入一个英尺数7
2.135
###Markdown
- 4
###Code
m,chushi,zuizhong=eval(input('输入水量、初始温度、最终温度'))
Q=M*(zuizhong-chushi)*4184
print(Q)
###Output
输入水量、初始温度、最终温度55.5,3.5,10.5
292880.0
###Markdown
- 5
###Code
chae,nianlilv=eval(input('输入差额、年利率'))
lixi=chae*(nianlilv/1200)
print(lixi)
###Output
输入差额、年利率1000,3.5
2.916666666666667
###Markdown
- 6
###Code
v0,v1,t=eval(input('输入初速度、末速度、时间'))
a=(v1-v0)/t
print(a)
###Output
输入初速度、末速度、时间5.5,50.9,4.5
10.088888888888889
###Markdown
- 7 进阶
###Code
x = eval(input('存入金额 '))
yuefen=eval(input('月份'))
s = 0
y=0
while s<yuefen:
y=(x+y)*(1+0.00417)
s=s+1
print(y)
###Output
存入金额 100
月份6
608.8181155768638
###Markdown
- 8 进阶
###Code
def x(num):
return sum(int(i) for i in str(num) if i.isdigit())
if __name__ == '__main__':
num = input('请输入一个整数: ')
print('{} 每位数相加之和是: {}'.format(num, x(num)))
###Output
请输入一个整数: 21
21 每位数相加之和是: 3
###Markdown
基本程序设计- 一切代码输入,请使用英文输入法
###Code
print('hello world')
###Output
hello world
###Markdown
编写一个简单的程序- 圆公式面积: area = radius \* radius \* 3.1415 在Python里面不需要定义数据的类型 控制台的读取与输入- input 输入进去的是字符串- eval
###Code
bianchang = int(input('请输入边长'))
area = bianchang * bianchang
print(area)
###Output
请输入边长2
4
###Markdown
- 在jupyter用shift + tab 键可以跳出解释文档 变量命名的规范- 由字母、数字、下划线构成- 不能以数字开头 \*- 标识符不能是关键词(实际上是可以强制改变的,但是对于代码规范而言是极其不适合)- 可以是任意长度- 驼峰式命名 变量、赋值语句和赋值表达式- 变量: 通俗理解为可以变化的量- x = 2 \* x + 1 在数学中是一个方程,而在语言中它是一个表达式- test = test + 1 \* 变量在赋值之前必须有值 同时赋值var1, var2,var3... = exp1,exp2,exp3... 定义常量- 常量:表示一种定值标识符,适合于多次使用的场景。比如PI- 注意:在其他低级语言中如果定义了常量,那么,该常量是不可以被改变的,但是在Python中一切皆对象,常量也是可以被改变的 数值数据类型和运算符- 在Python中有两种数值类型(int 和 float)适用于加减乘除、模、幂次 运算符 /(除)、//(整除)、**(平方) 运算符 % EP:- 25/4 多少,如果要将其转变为整数该怎么改写- 输入一个数字判断是奇数还是偶数- 进阶: 输入一个秒数,写一个程序将其转换成分和秒:例如500秒等于8分20秒- 进阶: 如果今天是星期六,那么10天以后是星期几? 提示:每个星期的第0天是星期天
###Code
int(25/4)
time=eval(input('>>')) #500//60
minute=time//60 #500%60
second=time%60
print(str(minute)+'分',str(second)+'秒')
a=eval(input('请输入一个数'))
if a%2==0:
print('这是一个偶数')
else:
print('这是一个奇数')
weekend = eval(input('星期'))
res = (weekend + 10) % 7
print('在10天之后是星期'+str(res))
###Output
星期2
在10天之后是星期5
###Markdown
科学计数法- 1.234e+2 - 1.234e-2 计算表达式和运算优先级
###Code
x,y,a,b,c = eval(input('>>'))
part1,part2,part3 = (3+4*x)/5,10*(y-5)*(a+b+c)/x,9*((4/x)+(9+x)/y)
result = part1-part2+part3
print(resule)
###Output
_____no_output_____
###Markdown
增强型赋值运算 类型转换- float -> int- 四舍五入 round
###Code
round(3.1415,2) #要四舍五入的值,保留的小数位数
###Output
_____no_output_____
###Markdown
EP:- 如果一个年营业税为0.06%,那么对于197.55e+2的年收入,需要交税为多少?(结果保留2为小数)- 必须使用科学计数法
###Code
round(6e-4*197.55e+2,2)
###Output
_____no_output_____
###Markdown
Project- 用Python写一个贷款计算器程序:输入的是月供(monthlyPayment) 输出的是总还款数(totalpayment)
###Code
贷款数,月利率,年限 = eval(input('money,month rate,years'))
月供 = (贷款数 * 月利率) / (1 - (1 / (1+月利率) ** (年限 * 12)))
总还款数 = 月供 * 年限 * 12
print(总还款数)
###Output
money,month rate,years10000,0.01,1
10661.854641401005
###Markdown
Homework- 1
###Code
celsius = eval(input('输入一个摄氏温度'))
fahrenheit = (9 / 5) * celsius + 32
print(str(celsius)+'摄氏度转换成华氏温度为'+str(fahrenheit))
###Output
输入一个摄氏温度43
43摄氏度转换成华氏温度为109.4
###Markdown
- 2
###Code
radius,length = eval(input('输入圆柱体的半径和高'))
area = radius * radius * 3.14
volume = area * length
print('圆柱体底面积是'+str(area))
print('圆柱体体积是',round(volume,1))
###Output
输入圆柱体的半径和高5.5,12
圆柱体底面积是94.985
圆柱体体积是 1139.8
###Markdown
- 3
###Code
feet = eval(input('输入英尺数'))
meter = feet * 305e-3
print(str(feet)+'英尺等于'+str(meter)+'米')
###Output
输入英尺数16.5
16.5英尺等于5.0325米
###Markdown
- 4
###Code
M = eval(input('输入按千克计的水量:'))
initalTemperature = eval(input('输入初始温度:'))
finalTemperature = eval(input('输入最终温度:'))
Q = M * (finalTemperature - initalTemperature) * 4184
print('所需能量为'+str(Q)+'焦耳')
###Output
输入按千克计的水量:55.5
输入初始温度:3.5
输入最终温度:10.5
所需能量为1625484.0焦耳
###Markdown
- 5
###Code
差额,年利率 = eval(input('输入差额和年利率:'))
利息 = 差额 * (年利率 / 1200)
print('下月要付利息:',round(利息,5))
###Output
输入差额和年利率:1000,3.5
下月要付利息: 2.91667
###Markdown
- 6
###Code
v0,v1,t = eval(input('输入初始速度、末速度和时间:'))
a = (v1 - v0) / t
print('平均加速度是:',round(a,4))
###Output
输入初始速度、末速度和时间:5.5,50.9,4.5
平均加速度是: 10.0889
###Markdown
- 7 进阶
###Code
账户总额 = eval(input('存入金额:'))
月利率 = 0.05 / 12
month1 = 账户总额 * (1 + 月利率)
month2 = (账户总额 + month1) * (1 + 月利率)
month3 = (账户总额 + month2) * (1 + 月利率)
month4 = (账户总额 + month3) * (1 + 月利率)
month5 = (账户总额 + month4) * (1 + 月利率)
month6 = (账户总额 + month5) * (1 + 月利率)
print('6个月后账户总额为:',round(month6,2))
###Output
存入金额:100
6个月后账户总额为: 608.81
###Markdown
- 8 进阶
###Code
num = eval(input('输入0-1000之间的任意数:'))
num1 = num % 10
num2 = (num // 10)%10
num3 = num //100
sum = num1 + num2 + num3
print('各位数字之和为:'+str(sum))
###Output
输入0-1000之间的任意数:999
各位数字之和为:27
###Markdown
基本程序设计- 一切代码输入,请使用英文输入法 编写一个简单的程序- 圆公式面积: area = radius \* radius \* 3.1415 在Python里面不需要定义数据的类型 控制台的读取与输入- input 输入进去的是字符串- eval - 在jupyter用shift + tab 键可以跳出解释文档 变量命名的规范- 由字母、数字、下划线构成- 不能以数字开头 \*- 标识符不能是关键词(实际上是可以强制改变的,但是对于代码规范而言是极其不适合)- 可以是任意长度- 驼峰式命名 变量、赋值语句和赋值表达式- 变量: 通俗理解为可以变化的量- x = 2 \* x + 1 在数学中是一个方程,而在语言中它是一个表达式- test = test + 1 \* 变量在赋值之前必须有值 同时赋值var1, var2,var3... = exp1,exp2,exp3... 定义常量- 常量:表示一种定值标识符,适合于多次使用的场景。比如PI- 注意:在其他低级语言中如果定义了常量,那么,该常量是不可以被改变的,但是在Python中一切皆对象,常量也是可以被改变的 数值数据类型和运算符- 在Python中有两种数值类型(int 和 float)适用于加减乘除、模、幂次 运算符 /、//、** 运算符 % EP:- 25/4 多少,如果要将其转变为整数该怎么改写- 输入一个数字判断是奇数还是偶数- 进阶: 输入一个秒数,写一个程序将其转换成分和秒:例如500秒等于8分20秒- 进阶: 如果今天是星期六,那么10天以后是星期几? 提示:每个星期的第0天是星期天
###Code
25//4
num = eval (input('>>'))
int
n = eval( input ('>>'))
h = n // 60
s = n % 60
print =(str(h) + '分'str(s) +'秒')
###Output
_____no_output_____
###Markdown
科学计数法- 1.234e+2- 1.234e-2
###Code
x= 10
y= 6
a= 0
b= 1
c= 1
text =(3+4*x)/5
text2 =(10(y-5)*(a+b+c))/x
text3 =9(4/x+(9+x)/y)
m=
###Output
_____no_output_____
###Markdown
计算表达式和运算优先级 增强型赋值运算 类型转换- float -> int- 四舍五入 round
###Code
round (3.14157,3)
###Output
_____no_output_____
###Markdown
EP:- 如果一个年营业税为0.06%,那么对于197.55e+2的年收入,需要交税为多少?(结果保留2为小数)- 必须使用科学计数法
###Code
round 6e-4*197.55e+2,2
###Output
_____no_output_____
###Markdown
Project- 用Python写一个贷款计算器程序:输入的是月供(monthlyPayment) 输出的是总还款数(totalpayment) Homework- 1
###Code
celsius = eval (input('>>'))
fahrenheit = (9 / 5) * celsius + 32
print (fahrenheit)
###Output
>>43
109.4
###Markdown
- 2
###Code
radius,lenght = eval (input('>>'))
area = radius * radius * 3.1415926
volume = area * lenght
print ('底面积为'+str(area)+'体积为'+str(volume))
###Output
>>5.5,12
###Markdown
- 3
###Code
yingchi = eval (input('>>'))
mi = yingchi *0.305
print(str(mi)+'米')
###Output
>>16.5
5.0325米
###Markdown
- 4
###Code
M = eval (input('>>'))
initialTemperature = eval (input('>>'))
finalTemperature = eval (input('>>'))
Q = M * (finalTemperature - initialTemperature) * 4184
print(Q)
###Output
>>55.5
>>3.5
>>10.5
1625484.0
###Markdown
- 5
###Code
cha,nian = eval (input('>>'))
m = cha * (nian / 1200 )
print(m)
###Output
>>1000,3.5
2.916666666666667
###Markdown
- 6
###Code
v0,v1,t = eval (input('>>'))
a = (v1 - v0) / t
print(a)
###Output
>>5.5,50.9,4.5
10.088888888888889
###Markdown
- 7 进阶
###Code
M = eval (input('>>'))
M1 = M * (1 + 0.00417)
M2 = (M + M1) * (1+ 0.00417)
M3 = (M + M2) * (1+ 0.00417)
M4 = (M + M3) * (1+ 0.00417)
M5 = (M + M4) * (1+ 0.00417)
M6 = (M + M5) * (1+ 0.00417)
print(M6)
###Output
>>100
608.8181155768638
###Markdown
- 8 进阶
###Code
M = eval (input('>>'))
M1 = M%10
M2 = M//100
M3 = (M%100)//10
M4= M1+ M2 + M3
print(M4)
###Output
>>999
27
###Markdown
基本程序设计- 一切代码输入,请使用英文输入法
###Code
print("hello would")
###Output
hello would
###Markdown
编写一个简单的程序- 圆公式面积: area = radius \* radius \* 3.1415
###Code
radius = 2.0
area = radius * radius * 3.1415
print(area)
print(radius)
###Output
12.566
2.0
###Markdown
在Python里面不需要定义数据的类型 控制台的读取与输入- input 输入进去的是字符串- eval - 在jupyter用shift + tab 键可以跳出解释文档
###Code
radius = int (input ("请输入一个边长"))
area = radius * radius
print (area)
###Output
请输入一个边长5
25
###Markdown
变量命名的规范- 由字母、数字、下划线构成- 不能以数字开头 \*- 标识符不能是关键词(实际上是可以强制改变的,但是对于代码规范而言是极其不适合)- 可以是任意长度- 驼峰式命名 变量、赋值语句和赋值表达式- 变量: 通俗理解为可以变化的量- x = 2 \* x + 1 在数学中是一个方程,而在语言中它是一个表达式- test = test + 1 \* 变量在赋值之前必须有值
###Code
x = eval(input("请输入x"))
x = x + x
print(x)
###Output
请输入x5
10
###Markdown
同时赋值var1, var2,var3... = exp1,exp2,exp3...
###Code
chang,kuan = eval (input (">>"))
mianji = chang * kuan
print(mianji)
###Output
>>12,10
120
###Markdown
定义常量- 常量:表示一种定值标识符,适合于多次使用的场景。比如PI- 注意:在其他低级语言中如果定义了常量,那么,该常量是不可以被改变的,但是在Python中一切皆对象,常量也是可以被改变的 数值数据类型和运算符- 在Python中有两种数值类型(int 和 float)适用于加减乘除、模、幂次 运算符 /、//、** 运算符 % EP:- 25/4 多少,如果要将其转变为整数该怎么改写- 输入一个数字判断是奇数还是偶数- 进阶: 输入一个秒数,写一个程序将其转换成分和秒:例如500秒等于8分20秒- 进阶: 如果今天是星期六,那么10天以后是星期几? 提示:每个星期的第0天是星期天
###Code
25//4
x = int (input ("输入一个数"))
if x % 2 == 0:
print ("输入的数字" + str(x) + "是一个偶数")
else :
print ("输入的数字" + str(x) + "是一个奇数")
miao = eval (input ("输入一个秒数"))
fen = miao // 60
miao = miao % 60
print(str(fen)+ "分" + str(miao) + "秒")
weekend = eval (input("今天是星期"))
res = (weekend + 10) % 7
print("那么10天后是星期" + str(res))
###Output
今天是星期6
那么10天后是星期2
###Markdown
科学计数法- 1.234e+2- 1.234e-2 计算表达式和运算优先级
###Code
x,y,a,b,c = 10,6,0,1,1
a1 = (3 + 4 * x )/ 5
a2 = (10 * (y - 5) * (a + b + c)) / x
a3 = 9 * (4 / x + (9 + x )/ y)
a = a1 - a2 + a3
print(a)
###Output
38.699999999999996
###Markdown
增强型赋值运算 类型转换- float -> int- 四舍五入 round
###Code
round(3.1415926,2)
###Output
_____no_output_____
###Markdown
EP:- 如果一个年营业税为0.06%,那么对于197.55e+2的年收入,需要交税为多少?(结果保留2为小数)- 必须使用科学计数法
###Code
round(6e-4 * 197.55e+2,2)
###Output
_____no_output_____
###Markdown
Project- 用Python写一个贷款计算器程序:输入的是月供(monthlyPayment) 输出的是总还款数(totalpayment) Homework- 1
###Code
celsius = eval (input ("输入一个摄氏温度"))
fahrenheit = (9 / 5) * celsius + 32
print("转化为华氏温度为" + str(fahrenheit))
###Output
输入一个摄氏温度43
转化为华氏温度为109.4
###Markdown
- 2
###Code
radius,high = eval (input("输入半径和高为"))
import math
area = radius * radius * math.pi
volume = area * high
print("这个圆柱的底面积为" + str(area) + "这个圆柱的体积为" + str(volume))
###Output
输入半径和高为5.5,12
这个圆柱的底面积为95.03317777109125这个圆柱的体积为1140.398133253095
###Markdown
- 3
###Code
foot = eval (input ("输入英尺为"))
mishu = 0.305 * foot
print("转化成米数为" + str(mishu))
###Output
输入英尺为16.5
转化成米数为5.0325
###Markdown
- 4
###Code
initial,final,weight = eval (input("输入水的初始温度和最终温度和水的质量"))
energy = weight * (final - initial) * 4184
print( "所需的能量为" + str(energy))
###Output
输入水的初始温度和最终温度和水的质量3.5,10.5,55.5
所需的能量为1625484.0
###Markdown
- 5
###Code
difference,lilv = eval (input ("输入差额和年利率为"))
interest = difference * (lilv / 1200)
print("下个月月供的利息为" + str(interest))
###Output
输入差额和年利率为1000,3.5
下个月月供的利息为2.916666666666667
###Markdown
- 6
###Code
v0,v1,t = eval (input("输入初始速度v0和末速度v1以及所占用的时间t"))
a = (v1 - v0) / t
print("平均速度是" + str(a))
###Output
输入初始速度v0和末速度v1以及所占用的时间t5.5,50.9,4.5
平均速度是10.088888888888889
###Markdown
- 7 进阶
###Code
money = eval ( input("存入"))
deposit = money * (1 + 0.00417)
for x in range (5) :
deposit = (deposit + 100) * ( 1 + 0.00417)
print(deposit)
###Output
存入100
608.8181155768638
###Markdown
- 8 进阶
###Code
x = eval ( input("输入"))
a = x % 10
y = x // 10
b = y % 10
c = x // 100
sum0 = a + b + c
print("各位数之和为" + str(sum0))
###Output
输入999
各位数之和为27
###Markdown
基本程序设计- 一切代码输入,请使用英文输入法
###Code
print('hello world')
###Output
hello world
###Markdown
编写一个简单的程序- 圆公式面积: area = radius \* radius \* 3.1415
###Code
radius = 2 #在Python中,不需要定义数据类型
area = radius * radius * 3.1415
print(area)
###Output
12.566
###Markdown
在Python里面不需要定义数据的类型 控制台的读取与输入- input 输入进去的是字符串- eval
###Code
radius = int (input('请输入一个值')) #求园面积
area = radius * radius * 3.1415
print(area)
lenth = eval (input('请输入长')) #求长方形面积
width = int (input('请输入宽'))
area = lenth * width
print(area)
import os #调用计算器
input_ = input('计算器')
os.system('calc')
###Output
qwe1
###Markdown
- 在jupyter用shift + tab 键可以跳出解释文档 变量命名的规范- 由字母、数字、下划线构成- 不能以数字开头 \*- 标识符不能是关键词(实际上是可以强制改变的,但是对于代码规范而言是极其不适合)- 可以是任意长度- 驼峰式命名 变量、赋值语句和赋值表达式- 变量: 通俗理解为可以变化的量- x = 2 \* x + 1 在数学中是一个方程,而在语言中它是一个表达式- test = test + 1 \* 变量在赋值之前必须有值 同时赋值var1, var2,var3... = exp1,exp2,exp3...
###Code
小米,小张 = '女','男'
print(小米)
height,width = eval(input('输入长和宽'))
area = height * width
print(area)
###Output
>>2,3
6
###Markdown
定义常量- 常量:表示一种定值标识符,适合于多次使用的场景。比如PI- 注意:在其他低级语言中如果定义了常量,那么,该常量是不可以被改变的,但是在Python中一切皆对象,常量也是可以被改变的 数值数据类型和运算符- 在Python中有两种数值类型(int 和 float)适用于加减乘除、模、幂次 运算符 /、//、**
###Code
#/除 //整除 ** 幂 % 取余
###Output
_____no_output_____
###Markdown
运算符 % EP:- 25/4 多少,如果要将其转变为整数该怎么改写- 输入一个数字判断是奇数还是偶数- 进阶: 输入一个秒数,写一个程序将其转换成分和秒:例如500秒等于8分20秒- 进阶: 如果今天是星期六,那么10天以后是星期几? 提示:每个星期的第0天是星期天
###Code
25 // 4 #或者int(25/4),25/4 多少,如果要将其转变为整数该怎么改写
# 输入一个数字判断是奇数还是偶数
a = eval (input('请输入一个数'))
if a%2==0:
print('偶数')
else:
print('奇数')
# 进阶: 输入一个秒数,写一个程序将其转换成分和秒:例如500秒等于8分20秒
sum = eval (input('请输入一个秒数:'))
min = sum//60
s = sum%60
print(str(min)+'分',str(s)+'秒')
# 进阶: 如果今天是星期六,那么10天以后是星期几? 提示:每个星期的第0天是星期天
weekend = eval(input('输入今天星期数:'))
res = (weekend + 10) % 7
print('今天是周'+str(weekend)+',10天之后是星期'+str(res))
###Output
输入今天星期数:6
今天是周6,10天之后是星期2
###Markdown
科学计数法- 1.234e+2- 1.234e-2
###Code
1.234e-2
###Output
_____no_output_____
###Markdown
计算表达式和运算优先级
###Code
x=10 #运算s的值
y=6
a=0
b=1
c=1
sum=(3+4*x)/5-10*(y-5)*(a+b+c)/x+9*(4/x+(9+x)/y)
print(sum)
###Output
38.699999999999996
###Markdown
增强型赋值运算 类型转换- float -> int- 四舍五入 round
###Code
round(1.34) #四舍五入
###Output
_____no_output_____
###Markdown
EP:- 如果一个年营业税为0.06%,那么对于197.55e+2的年收入,需要交税为多少?(结果保留2为小数)- 必须使用科学计数法
###Code
#如果一个年营业税为0.06%,那么对于197.55e+2的年收入,需要交税为多少?(结果保留2为小数)
round(0.06e-2 * 197.55e+2 ,2)
###Output
_____no_output_____
###Markdown
Project- 用Python写一个贷款计算器程序:输入的是月供(monthlyPayment) 输出的是总还款数(totalpayment)
###Code
月供 = (贷款数 * 月利息) / (1 - 1/((1+月利息)**(年限*12)))
应还款数 = 月供 * 年限 * 12
###Output
_____no_output_____
###Markdown
Homework- 1
###Code
celsius = eval (input ('请输入一个摄氏温度:'))
fahrenheit = (9 / 5) * celsius + 32
print('转化为华氏温度为:'+str(fahrenheit))
###Output
请输入一个摄氏温度:27
转化为华氏温度为:80.6
###Markdown
- 2
###Code
radius,length = eval (input ('请输入半径值和高值:'))
area = radius * radius * 3.14
volume = area * length
print ('圆柱体底面积为:'+str(area))
print ('圆柱体体积为:'+str(volume))
###Output
请输入半径值和高值:5.5,12
圆柱体底面积为:94.985
圆柱体体积为:1139.82
###Markdown
- 3
###Code
feet = eval (input ('输入将要转换的英尺值:'))
meters = feet * 0.305
print (str(feet)+'英尺'+'='+str(meters)+'米')
###Output
输入将要转换的英尺值:4
4英尺=1.22米
###Markdown
- 4
###Code
kilograms = eval (input('输入水量,单位为千克:'))
initialtemperature = eval (input('输入水初始温度:'))
finaltemperature = eval (input('输入水加热后温度:'))
Q = kilograms * ( finaltemperature - initialtemperature ) * 4184
print ('所需能量为:'+str(Q))
###Output
输入水量,单位为千克:55.5
输入水初始温度:3.5
输入水加热后温度:10.5
所需能量为:1625484.0
###Markdown
- 5
###Code
balance,interest = eval (input('请输入差额和年利率:'))
interests = balance * (interest / 1200)
print ('利息为:'+str(interests))
###Output
请输入差额和年利率:1000,3.5
利息为:2.916666666666667
###Markdown
- 6
###Code
v0,v1,t = eval (input('输入初始速度、末速度和时间:'))
a = ( v1 - v0 ) / t
print('平均速度为:'+str(a))
###Output
输入初始速度、末速度和时间:5.5,50.9,4.5
平均速度为:10.088888888888889
###Markdown
- 7 进阶
###Code
amount = eval (input('每月存款数:'))
i = 0
account = 0
for num in range (0,6):
account = (amount + account) * ( 1 + 0.05/12 )
i += 1
else:
print ('每个月存'+str(amount)+'六个月后账户总额为:'+str(account))
###Output
每月存款数:100
每个月存100六个月后账户总额为:608.811017705596
###Markdown
- 8 进阶
###Code
number = eval (input('输入一个0到1000之间的数:'))
g = number % 10
s = number // 10 % 10
b = number // 100 % 10
sum = g + s + b
print ('提取各位数字相加的和为:'+str(sum))
###Output
输入一个0到1000之间的数:932
提取各位数字相加的和为:14
###Markdown
基本程序设计- 一切代码输入,请使用英文输入法
###Code
print ("hello world")
###Output
hello world
###Markdown
编写一个简单的程序- 圆公式面积: area = radius \* radius \* 3.1415
###Code
radius = 2.0
area = radius * radius * 3.1415
print(area)
###Output
12.566
###Markdown
在Python里面不需要定义数据的类型 控制台的读取与输入- input 输入进去的是字符串- eval
###Code
radius = int(input("请输入一个边长"))
area = radius * radius
print(area)
###Output
请输入一个边长5
25
###Markdown
- 在jupyter用shift + tab 键可以跳出解释文档 变量命名的规范- 由字母、数字、下划线构成- 不能以数字开头 \*- 标识符不能是关键词(实际上是可以强制改变的,但是对于代码规范而言是极其不适合)- 可以是任意长度- 驼峰式命名 变量、赋值语句和赋值表达式- 变量: 通俗理解为可以变化的量- x = 2 \* x + 1 在数学中是一个方程,而在语言中它是一个表达式- test = test + 1 \* 变量在赋值之前必须有值
###Code
radius=eval(input(">>"))
radius=radius+radius
print(radius)
###Output
>>10
20
###Markdown
同时赋值var1, var2,var3... = exp1,exp2,exp3...
###Code
小明,小红,小兰="男","女","男女"
print(小明,小红,小兰)
height,width=eval(input(">>"))
area_=height*width
print(area_)
###Output
>>1,2
2
###Markdown
定义常量- 常量:表示一种定值标识符,适合于多次使用的场景。比如PI- 注意:在其他低级语言中如果定义了常量,那么,该常量是不可以被改变的,但是在Python中一切皆对象,常量也是可以被改变的 数值数据类型和运算符- 在Python中有两种数值类型(int 和 float)适用于加减乘除、模、幂次 运算符 /、//、**
###Code
10//10#整除
10**2#平方
10/10#除
12%5#取余
###Output
_____no_output_____
###Markdown
运算符 % EP:- 25/4 多少,如果要将其转变为整数该怎么改写- 输入一个数字判断是奇数还是偶数- 进阶: 输入一个秒数,写一个程序将其转换成分和秒:例如500秒等于8分20秒- 进阶: 如果今天是星期六,那么10天以后是星期几? 提示:每个星期的第0天是星期天
###Code
int(25//4)
num=eval(input(">>"))
if num % 2 == 0 :
print ("此数为"+str(num)+"偶数")
else :
print ("此数为"+str(num)+"奇数")
times=eval(input("输入一个秒数"))
mins=times//60
times_=times%60
print(mins,times_)
weekend=eval(input(">>"))
res=(weekend+10)%7
print(str(weekend)+"在十天之后是"+str(res))
###Output
>>10
10在十天之后是6
###Markdown
科学计数法- 1.234e+2- 1.234e-2
###Code
1.234e+2
1.234e-2
###Output
_____no_output_____
###Markdown
计算表达式和运算优先级
###Code
x=10
y=6
a=0
b=1
c=1
x,y,a,b,c=10,6,0,1,1
sum1=(3+4*x)/5
sum2=(10*(y-5)*(a+b+c))/x
sum3=9*((4/x+(9+x)/y))
res=sum1-sum2+sum3
print(res)
###Output
38.699999999999996
###Markdown
增强型赋值运算
###Code
a=1
a+=1
###Output
_____no_output_____
###Markdown
类型转换- float -> int- 四舍五入 round
###Code
round(3.1415926,2) # 四舍五入,保存两位小数
###Output
_____no_output_____
###Markdown
EP:- 如果一个年营业税为0.06%,那么对于197.55e+2的年收入,需要交税为多少?(结果保留2为小数)- 必须使用科学计数法
###Code
round((6e-4)*(197.55e+2),2)
###Output
_____no_output_____
###Markdown
Project- 用Python写一个贷款计算器程序:输入的是月供(monthlyPayment) 输出的是总还款数(totalpayment)
###Code
贷款数,月利率,年限=eval(input("money,month rate,years"))
月供=(贷款数*月利率)/(1-(1/(1+月利率)**(年限*12)))
总还款数=月供*年限*12
print(总还款数)
###Output
money,month rate,years10000,0.12,5
72080.3036429309
###Markdown
Homework- 1
###Code
celsius=eval(input("摄氏温度为"))
fahrenheit=(9/5)*celsius+32
print("华氏温度为"+str(fahrenheit))
###Output
摄氏温度为43
华氏温度为109.4
###Markdown
- 2
###Code
radius,length=eval(input("半径,长"))
area=radius*radius*3.141592
volume=area*length
print("底面积为"+str(round(area,4)))
print("体积为"+str(round(volume,1)))
###Output
半径,长5.5,12
底面积为95.0332
体积为1140.4
###Markdown
- 3
###Code
feet=eval(input("英尺数"))
feet=0.305*meters
print(str(meters)+"米")
###Output
英尺数16.5
5.0325米
###Markdown
- 4
###Code
M=eval(input("水量"))
initialTemperature=eval(input("初始温度"))
finalTemperature=eval(input("最终温度"))
Q=M*(finalTemperature-initialTemperature)*4184
print("能量为"+str(Q))
###Output
水量55.5
初始温度3.5
最终温度10.5
能量为1625484.0
###Markdown
- 5
###Code
差额,年利率=eval(input("balance,year rate"))
利息=差额*(年利率/1200)
print(round(利息,5))
###Output
balance,year rate1000,3.5
2.91667
###Markdown
- 6
###Code
v0,v1,t=eval(input("初始速度,末速度,时间"))
a=(v1-v0)/t
print("加速度为"+str(round(a,4)))
###Output
初始速度,末速度,时间5.5,50.9,4.5
加速度为10.0889
###Markdown
- 7 进阶
###Code
money=eval(input("存入金额"))
money1=100*(1+0.00417)
money2=(100+money1)*(1+0.00417)
money3=(100+money2)*(1+0.00417)
money4=(100+money3)*(1+0.00417)
money5=(100+money4)*(1+0.00417)
money6=(100+money5)*(1+0.00417)
print(round(money6,2))
###Output
存入金额100
608.82
###Markdown
- 8 进阶
###Code
num=eval(input("数字"))
num1=num%10
num2=num//100
num3=num//10%10
num4=num1+num2+num3
print(num4)
###Output
数字999
27
###Markdown
基本程序设计- 一切代码输入,请使用英文输入法
###Code
print('hello world')
###Output
hello world
###Markdown
编写一个简单的程序- 圆公式面积: area = radius \* radius \* 3.1415
###Code
radius=2
radius='100'
#area = radius * radius * 3.1415
#print(area)
print(radius)
###Output
100
###Markdown
在Python里面不需要定义数据的类型 控制台的读取与输入- input 输入进去的是字符串- eval
###Code
radius = int(input('请输入一个半径'))
area = radius * radius * 3.1415
print(area)
radius = eval(input('请输入一个边'))
area = radius * radius
print(area)
radius1 = int(input('请输入一个长'))
radius2 = int(input('请输入一个宽'))
area = radius1 * radius2
print(area)
input os
input_ = input('今天')
os.system('')
###Output
_____no_output_____
###Markdown
- 在jupyter用shift + tab 键可以跳出解释文档 变量命名的规范- 由字母、数字、下划线构成- 不能以数字开头 \*- 标识符不能是关键词(实际上是可以强制改变的,但是对于代码规范而言是极其不适合)- 可以是任意长度- 驼峰式命名 变量、赋值语句和赋值表达式- 变量: 通俗理解为可以变化的量- x = 2 \* x + 1 在数学中是一个方程,而在语言中它是一个表达式- test = test + 1 \* 变量在赋值之前必须有值 同时赋值var1, var2,var3... = exp1,exp2,exp3...
###Code
a,b,c = '10','20','30'
print(a,b,c)
heigth,width = eval(input('>>'))
area = heigth * width
print(area)
###Output
>>10,50
500
###Markdown
定义常量- 常量:表示一种定值标识符,适合于多次使用的场景。比如PI- 注意:在其他低级语言中如果定义了常量,那么,该常量是不可以被改变的,但是在Python中一切皆对象,常量也是可以被改变的 数值数据类型和运算符- 在Python中有两种数值类型(int 和 float)适用于加减乘除、模、幂次 运算符 /(除)、//(整除)、**(求幂) 运算符 % EP:- 25/4 多少,如果要将其转变为整数该怎么改写- 输入一个数字判断是奇数还是偶数- 进阶: 输入一个秒数,写一个程序将其转换成分和秒:例如500秒等于8分20秒- 进阶: 如果今天是星期六,那么10天以后是星期几? 提示:每个星期的第0天是星期天
###Code
int(25/4)
shu = eval(input('>>'))
if shu % 2==0:
print(str(shu)+'是一个偶数')
else :
print(str(shu)+'是一个奇数')
second = eval(input('>>'))
a = second // 60
b =second % 60
print(str(a)+'分'+str(b)+'秒')
week = eval(input('>>'))
res = (week+10) % 7
print('10天以后是星期'+str(res))
###Output
>>6
10天以后是星期2
###Markdown
科学计数法- 1.234e+2- 1.234e-2
###Code
1.234e+2
###Output
_____no_output_____
###Markdown
计算表达式和运算优先级 分开写
###Code
x = 10
y = 6
a = 0
b = 1
c = 1
sss = (3+4*x)/5
sss1 = 10*(y-5)*(a+b+c)
sss2 = 9*(4/x+(9+x)/y)
ss = sss - sss1 + sss2
print(ss)
###Output
20.699999999999996
###Markdown
增强型赋值运算 类型转换- float -> int- 四舍五入 round
###Code
round(3.5) # 第一个是数,第二个为位数
round
###Output
_____no_output_____
###Markdown
EP:- 如果一个年营业税为0.06%,那么对于197.55e+2的年收入,需要交税为多少?(结果保留2为小数)- 必须使用科学计数法
###Code
a = (197.55e+2) * (6e-4)
round(a,2)
###Output
_____no_output_____
###Markdown
Project- 用Python写一个贷款计算器程序:输入的是月供(monthlyPayment) 输出的是总还款数(totalpayment) Homework- 1
###Code
Celsius = eval(input('Enter a degree in Celsius:'))
fahrenheit = (9 / 5) * Celsius + 32
print(str(Celsius)+' Celsius is '+str(fahrenheit)+' Fahrenheit')
###Output
Enter a degree in Celsius:43
43 Celsius is 109.4 Fahrenheit
###Markdown
- 2
###Code
import math
radius,length = eval(input('Enter the radius and length of a cylinder :'))
area = radius * radius * math.pi
volume = area * length
print('The area is '+ str(area))
print('The volume is '+ str(volume))
###Output
Enter the radius and length of a cylinder :5.5,12
The area is 95.03317777109125
The volume is 1140.398133253095
###Markdown
- 3
###Code
feet = eval(input('Enter a value for feet :'))
meters = feet * 0.305
print(str(feet) + ' feet is ' + str(meters) + ' meters')
###Output
Enter a value for feet :16.5
16.5 feet is 5.0325 meters
###Markdown
- 4
###Code
fil = eval(input('Enter the amount of water in kilograms :'))
ini = eval(input('Enter the initial temperature :'))
fin = eval(input('Enter the final temperature:'))
Q = fil * (fin - ini) * 4184
print('The energy needed is ' + str(Q))
###Output
Enter the amount of water in kilograms :55.5
Enter the initial temperature :3.5
Enter the final temperature:10.5
The energy needed is 1625484.0
###Markdown
- 5
###Code
blance,rate = eval(input('Enter blance and interest rate (e.g., 3 for 3%):'))
interest = blance * (rate / 1200)
print('The interest is ' + str(interest))
###Output
Enter blance and interest rate (e.g., 3 for 3%):1000,3.5
The interest is 2.916666666666667
###Markdown
- 6
###Code
v0,v1,t = eval(input('Enter v0,v1, and t :'))
a = (v1 - v0) / t
print('The average acceleration is ' + str(a))
###Output
Enter v0,v1, and t :5.5,50.9,4.5
The average acceleration is 10.088888888888889
###Markdown
- 7 进阶
###Code
b = 1 + 0.00417
money = eval(input('Enter the moonthly saving amount :'))
a = money * b
a1 = (money + a) * b
a2 = (money + a1) * b
a3 = (money + a2)* b
a4 = (money + a3) * b
a5 = (money + a4) *b
# aaa = a + a1 + a2 + a3 + a4 + a5
print('After the sixth month,the account value is '+ str(a5))
###Output
Enter the moonthly saving amount :100
After the sixth month,the account value is 608.8181155768638
###Markdown
- 8 进阶
###Code
number = eval(input('Enter a mumber between 0 and 1000 :'))
a = number % 10
b = number // 10
c = b % 10
d = b // 10
e = a + c + d
# print(a)
# print(b)
# print(c)
# print(d)
print('The sum of the digits is ' + str(e))
###Output
Enter a mumber between 0 and 1000 :999
The sum of the digits is 27
|
Lec04_Eager execution/Lec04_Custom training basics.ipynb | ###Markdown
CS 20 : TensorFlow for Deep Learning Research Lecture 04 : Eager execution Custon training basics* Reference + https://www.tensorflow.org/tutorials/eager/custom_training?hl=ko Setup
###Code
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
tf.enable_eager_execution()
###Output
_____no_output_____
###Markdown
VariablesTensors in TensorFlow are immutable stateless objects. Machine learning models, however, need to have changing state: as your model trains, the same code to compute predictions should behave differently over time (hopefully with a lower loss!). To represent this state which needs to change over the course of your computation, you can choose to rely on the fact that Python is a stateful programming language:
###Code
# Using python state
x = tf.zeros([10, 10])
x += 2 # This is equivalent to x = x + 2, which does not mutate the original
# value of x
print(x)
###Output
tf.Tensor(
[[2. 2. 2. 2. 2. 2. 2. 2. 2. 2.]
[2. 2. 2. 2. 2. 2. 2. 2. 2. 2.]
[2. 2. 2. 2. 2. 2. 2. 2. 2. 2.]
[2. 2. 2. 2. 2. 2. 2. 2. 2. 2.]
[2. 2. 2. 2. 2. 2. 2. 2. 2. 2.]
[2. 2. 2. 2. 2. 2. 2. 2. 2. 2.]
[2. 2. 2. 2. 2. 2. 2. 2. 2. 2.]
[2. 2. 2. 2. 2. 2. 2. 2. 2. 2.]
[2. 2. 2. 2. 2. 2. 2. 2. 2. 2.]
[2. 2. 2. 2. 2. 2. 2. 2. 2. 2.]], shape=(10, 10), dtype=float32)
###Markdown
***TensorFlow, however, has stateful operations built in, and these are often more pleasant to use than low-level Python representations of your state.*** To represent weights in a model, for example, it's often convenient and efficient ***to use TensorFlow variables.*** ***Computations using Variables are automatically traced when computing gradients***. For Variables representing embeddings TensorFlow will do sparse updates by default, which are more computation and memory efficient. Using Variables is also a way to quickly let a reader of your code know that this piece of state is mutable.
###Code
v = tf.Variable(1.0)
print(v)
# Re-assign the value
v.assign(3.0)
print(v)
# Use `v` in a TensorFlow operation like tf.square() and reassign
v.assign(tf.square(v))
print(v.numpy)
###Output
<tf.Variable 'Variable:0' shape=() dtype=float32, numpy=1.0>
<tf.Variable 'Variable:0' shape=() dtype=float32, numpy=3.0>
<bound method ResourceVariable.numpy of <tf.Variable 'Variable:0' shape=() dtype=float32, numpy=9.0>>
###Markdown
Example: Fitting a linear model1. Define the model2. Define a loss function3. Obtain training data4. Run through the training data and use "optimizer" to adjust the variables to fit the data define model
###Code
class Model():
def __init__(self):
self.w = tf.Variable(tf.random_normal(shape = []))
self.b = tf.Variable(0.)
def __call__(self, x):
return self.w * x + self.b
model = Model()
###Output
_____no_output_____
###Markdown
Define a loss function
###Code
def loss_fn(predicted_y, desired_y):
return tf.reduce_mean(tf.square(predicted_y - desired_y))
# Obtain training data
true_w = 3.0
true_b = 2.0
num_examples = 1000
inputs = tf.random_normal(shape=[num_examples])
noise = tf.random_normal(shape=[num_examples])
outputs = inputs * true_w + true_b + noise
plt.scatter(inputs, outputs, c='b')
plt.scatter(inputs, model(inputs), c='r')
plt.show()
print('Current loss: '),
print(loss_fn(model(inputs), outputs).numpy())
###Output
_____no_output_____
###Markdown
Run through the training data and use "optimizer" to adjust the variables to fit the data
###Code
epochs = 10
batch_size = 64
learning_rate = .1
data = tf.data.Dataset.from_tensor_slices((inputs, outputs))
data = data.shuffle(500)
data = data.batch(batch_size = batch_size)
# When using tf.train, you read the document (https://www.tensorflow.org/guide/eager)
w_hist = []
b_hist = []
for epoch in range(epochs):
avg_loss = 0
tr_step = 0
for mb_x, mb_y in data:
with tf.GradientTape() as tape:
mb_yhat = model(mb_x)
mb_loss = loss_fn(mb_yhat, mb_y)
dw, db = tape.gradient(target = mb_loss, sources = [model.w, model.b])
model.w.assign_sub(learning_rate * dw)
model.b.assign_sub(learning_rate * db)
tr_step += 1
avg_loss += mb_loss
else:
w_hist.append(model.w.numpy())
b_hist.append(model.b.numpy())
avg_loss /= tr_step
print('epoch: {:2}, w: {:.2f}, b: {:.2f}, mse_loss: {:.3f}'.format(epoch + 1, w_hist[-1],
b_hist[-1], avg_loss))
# Let's plot it all
plt.plot(range(epochs), w_hist, 'r',
range(epochs), b_hist, 'b')
plt.plot([true_w] * len(range(epochs)), 'r--',
[true_b] * len(range(epochs)), 'b--')
plt.legend(['w', 'b', 'true_w', 'true_b'])
plt.show()
###Output
_____no_output_____ |
Pymaceuticals/pymaceuticals_HY.ipynb | ###Markdown
Observations about the results of the study. Main drugs of interest were: "Capomulin", "Infubinol", "Ketapril", "Placebo"1. In reviewing "Tumor Response to Treatment" and "Tumor Change Over 45-Day Treatment": Capomulin showed response to treatment, with 19% reduction over 45 days of treatment. Other drugs and placebo group did not show signs of tumor reduction (except Ramicane, see note below).2. In reviewing "Metastatic Spread During Treatment": Placebo and Ketapril groups had most spread. Capomulin (and Ramicane) had the least spread. 3. In reviewing "Survival During Treatment": Capomulin had the best results for survival. (Followed by Ramicane, not shown as a part of the assignment, see note below).NOTE: I also looked at all 10 drugs, and noticed the drug, Ramicane. Ramicane showed response to treatment, with 22% reduction over 45 days of treatment (higher than Capomulin). Ramicane also had less metastatic spread than Capomulin. Ramicane had the second highest survial rate, after Capomulin.
###Code
# Dependencies and Setup
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from scipy.stats import sem
# Hide warning messages in notebook
import warnings
warnings.filterwarnings('ignore')
# File to Load (Remember to Change These)
mouse_drug_data_to_load = "data/mouse_drug_data.csv"
clinical_trial_data_to_load = "data/clinicaltrial_data.csv"
# Read the Mouse and Drug Data and the Clinical Trial Data
df_mouse_drug_data = pd.read_csv(mouse_drug_data_to_load)
df_clinical_trial_data = pd.read_csv(clinical_trial_data_to_load)
df_mouse_drug_data.describe()
# Combine the data into a single dataset
df_complete = pd.merge(df_clinical_trial_data, df_mouse_drug_data, how="left", on=["Mouse ID"])
# Display the data table for preview
df_complete.head()
###Output
_____no_output_____
###Markdown
Tumor Response to Treatment
###Code
# Store the Mean Tumor Volume Data Grouped by Drug and Timepoint
mean_tumorvolume = df_complete.groupby(['Drug', 'Timepoint'])["Tumor Volume (mm3)"].mean()
# Convert to DataFrame
df_tumorvolume = pd.DataFrame({"Tumor Volume (mm3)": mean_tumorvolume})
df_tumorvolume = df_tumorvolume.reset_index()
# Preview DataFrame
df_tumorvolume
# Store the Standard Error of Tumor Volumes Grouped by Drug and Timepoint
SE_tumorvolume = df_complete.groupby(['Drug', 'Timepoint'])["Tumor Volume (mm3)"].agg(sem)
# Convert to DataFrame
df_tumorvolume_SE = pd.DataFrame({"Tumor Volume (mm3)": SE_tumorvolume})
df_tumorvolume_SE = df_tumorvolume_SE.reset_index()
# Preview DataFrame
df_tumorvolume_SE.head()
# Minor Data Munging to Re-Format the Data Frames
df_pivot_tumorvolume = df_tumorvolume.pivot (index = "Timepoint", columns = "Drug", values = "Tumor Volume (mm3)")
df_pivot_tumorvolume_SE = df_tumorvolume_SE.pivot(index = "Timepoint", columns = "Drug", values = "Tumor Volume (mm3)")
# Preview that Reformatting worked
df_pivot_tumorvolume.head()
###Output
_____no_output_____
###Markdown
Plotting - Choose Drugs to Plot
###Code
# Pick Drugs
## Drugs of interest
drugs_toplot = ["Capomulin", "Infubinol", "Ketapril", "Placebo"]
## Review all drugs
# drugs_toplot = list(df_pivot_tumorvolume.columns)
# Generate the Plot (with Error Bars)
# DFs to Plot
df_meandrugs_toplot = df_pivot_tumorvolume.loc[:, drugs_toplot]
df_semdrugs_toplot = df_pivot_tumorvolume_SE.loc[:, drugs_toplot]
#plot
ax = df_meandrugs_toplot.plot(figsize=(10,5),
color=['r', 'b', 'g', 'k', 'c', 'm', 'y', 'purple', 'olive', 'navy'],
yerr = df_semdrugs_toplot,
legend = False)
ax.set_prop_cycle(color=['r', 'b', 'g', 'k', 'c', 'm', 'y', 'purple', 'olive', 'navy'])
df_meandrugs_toplot.plot(figsize=(10,5),
style=['^-', 'o--', 'x-.', 'D-', '<-', 'v--', 'H-.', '>-', '+--', 'h-.'],
markersize=5, ax=ax)
ax.set_xlabel("Time (Days)")
ax.set_ylabel("Tumor Volume (mm3)")
plt.title("Tumor Response to Treatment", fontsize=14, fontweight="bold")
plt.grid(which = 'major', axis = "both")
ax.set_xlim(0, max(df_tumorvolume["Timepoint"])+5)
ax.set_ylim(0, max(df_tumorvolume["Tumor Volume (mm3)"])+10)
# Save the Figure
plt.tight_layout()
plt.savefig("ResponseToTreatment_HY.png")
# Show the chart
plt.show()
###Output
_____no_output_____
###Markdown
Metastatic Response to Treatment
###Code
# Store the Mean Met. Site Data Grouped by Drug and Timepoint
mean_metsite = df_complete.groupby(['Drug', 'Timepoint'])["Metastatic Sites"].mean()
# Convert to DataFrame
df_metsite = pd.DataFrame({"Metastatic Sites": mean_metsite})
df_metsite = df_metsite.reset_index()
# Preview DataFrame
df_metsite.head()
# Store the Standard Error associated with Met. Sites Grouped by Drug and Timepoint
SE_metsite = df_complete.groupby(['Drug', 'Timepoint'])["Metastatic Sites"].agg(sem)
# Convert to DataFrame
df_metsite_SE = pd.DataFrame({"Metastatic Sites": SE_metsite})
df_metsite_SE = df_metsite_SE.reset_index()
# Preview DataFrame
df_metsite_SE.head()
# Minor Data Munging to Re-Format the Data Frames
df_pivot_metsite = df_metsite.pivot (index = "Timepoint", columns = "Drug", values = "Metastatic Sites")
df_pivot_metsite_SE = df_metsite_SE.pivot(index = "Timepoint", columns = "Drug", values = "Metastatic Sites")
# Preview that Reformatting worked
df_pivot_metsite.head()
# Generate the Plot (with Error Bars)
# DFs to Plot
df_meanmetsite_toplot = df_pivot_metsite.loc[:, drugs_toplot]
df_semmetsite_toplot = df_pivot_metsite_SE.loc[:, drugs_toplot]
#plot
ax = df_meanmetsite_toplot.plot(figsize=(10,5),
color=['r', 'b', 'g', 'k', 'c', 'm', 'y', 'purple', 'olive', 'navy'],
yerr = df_semmetsite_toplot,
legend = False)
ax.set_prop_cycle( color=['r', 'b', 'g', 'k', 'c', 'm', 'y', 'purple', 'olive', 'navy'])
df_meanmetsite_toplot.plot(figsize=(10,5),
style=['^-', 'o--', 'x-.', 'D-', '<-', 'v--', 'H-.', '>-', '+--', 'h-.'],
markersize=5, ax=ax)
ax.set_xlabel("Treatment Duration (Days)")
ax.set_ylabel("Met. Sites")
plt.title("Metastatic Spread During Treatment", fontsize=14, fontweight="bold")
plt.grid(which = 'major', axis = "both")
ax.set_xlim(0, max(df_metsite["Timepoint"])+5)
ax.set_ylim(0, max(df_metsite["Metastatic Sites"])+2)
# Save the Figure
plt.tight_layout()
plt.savefig("MetastaticSpread_HY.png")
# Show the Figure
plt.show()
###Output
_____no_output_____
###Markdown
Survival Rates
###Code
# Store the Count of Mice Grouped by Drug and Timepoint (W can pass any metric)
count_mouse = df_complete.groupby(['Drug', 'Timepoint'])["Mouse ID"].count()
# Convert to DataFrame
df_mousecount = pd.DataFrame({"Mouse Count": count_mouse})
df_mousecount = df_mousecount.reset_index()
# Preview DataFrame
df_mousecount.head()
# Minor Data Munging to Re-Format the Data Frames
df_pivot_mousecount = df_mousecount.pivot (index = "Timepoint", columns = "Drug", values = "Mouse Count")
# Preview the Data Frame
df_pivot_mousecount
# Generate the Plot (Accounting for percentages)
#DF to Plot
df_pivot_percentmousecount = df_pivot_mousecount
df_pivot_mousecount.iloc[0]
df_pivot_percentmousecount = df_pivot_mousecount.apply(lambda x: (x / df_pivot_mousecount.iloc[0])*100, axis =1)
df_pivot_percentmousecount_toplot = df_pivot_percentmousecount.loc[:, drugs_toplot]
#plot
ax = df_pivot_percentmousecount_toplot.plot(figsize=(10,5),
color=['r', 'b', 'g', 'k', 'c', 'm', 'y', 'purple', 'olive', 'navy'],
legend = False)
ax.set_prop_cycle(color=['r', 'b', 'g', 'k', 'c', 'm', 'y', 'purple', 'olive', 'navy'])
df_pivot_percentmousecount_toplot.plot(figsize=(10,5),
style=['^-', 'o--', 'x-.', 'D-', '<-', 'v--', 'H-.', '>-', '+--', 'h-.'],
markersize=5, ax=ax)
ax.set_xlabel("Time (Days)")
ax.set_ylabel("Survival Rate (%)")
plt.title("Survival During Treatment", fontsize=14, fontweight="bold")
plt.grid(which = 'major', axis = "both")
ax.set_xlim(0, max(df_mousecount["Timepoint"])+5)
ax.set_ylim(0, 110)
# Save the Figure
plt.tight_layout()
plt.savefig("Survival_HY.png")
# Show the Figure
plt.show()
###Output
_____no_output_____
###Markdown
Summary Bar Graph
###Code
# Calculate the percent changes for each drug
df_summary = df_pivot_tumorvolume.T
days_max = max(df_complete["Timepoint"])
days_min = min(df_complete["Timepoint"])
df_summary ["percent change"] = ((df_summary[days_max] - df_summary[days_min]) / df_summary[days_min]) *100
# Display the data to confirm
# Store all Relevant Percent Changes into a Tuple
# The following line of code is for following instructions provided,
# but I'm not using the code for the rest of this exercise.
tup_percent_change = (df_summary ["percent change"])
tup_percent_change
# Splice the data between passing and failing drugs
df_summarydrugs_toplot = df_summary.loc[drugs_toplot, :]
df_summarydrugs_toplot = df_summarydrugs_toplot [["percent change"]]
df_summarydrugs_toplot = df_summarydrugs_toplot.sort_values("percent change")
df_summarydrugs_toplot = df_summarydrugs_toplot.reset_index()
passing_drugs = df_summarydrugs_toplot[df_summarydrugs_toplot["percent change"] < 0]
passing_drugs = passing_drugs.sort_values("percent change")
failing_drugs = df_summarydrugs_toplot[df_summarydrugs_toplot["percent change"] > 0]
failing_drugs = failing_drugs.sort_values("percent change")
# Orient widths. Add labels, tick marks, etc.
fig, ax = plt.subplots(figsize=(10,5))
x_ticks = np.arange (len(df_summarydrugs_toplot))
x_axis_pass = int(len(passing_drugs))
x_axis_fail = int(len(failing_drugs))
last_axis = x_axis_pass+x_axis_fail
x_axis_p = np.arange(x_axis_pass)
x_axis_f = np.arange(x_axis_pass, last_axis)
x_labels = df_summarydrugs_toplot["Drug"]
plt.xticks(x_ticks, x_labels)
plt.xlim(-0.75, len(x_ticks)-0.25)
plt.ylim(min(df_summarydrugs_toplot["percent change"])-10, max(df_summarydrugs_toplot["percent change"])+10)
passing_bar = ax.bar(x_axis_p, passing_drugs["percent change"], color='green', alpha = .5, width = .5)
failing_bar = ax.bar(x_axis_f, failing_drugs["percent change"], color='red', alpha = .5, width = .5)
plt.grid(which = 'major', axis = "both")
ax.set_xlabel("Drugs")
ax.set_ylabel("% Tumor Volume Change")
plt.title("Tumor Change Over 45 Day Treatment", fontsize=14, fontweight="bold")
# Use functions to label the percentages of changes
def autolabel(rects):
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,
'%d' % int(height)+"%",
ha='center', va='bottom', color = "black")
# Call functions to implement the function calls
autolabel(passing_bar)
autolabel(failing_bar)
# Save the Figure
plt.tight_layout()
plt.savefig("TumorChangeBarHY.png")
# Show the Figure
plt.show()
###Output
_____no_output_____ |
ejercicios/3_Pandas/Ejercicios de Visualizacion con Pandas.ipynb | ###Markdown
Ejercicio de visualizacion de informacion con PandasEste es un pequenio ejercicio para revisar las diferentes graficas que nos permite generar Pandas. * NOTA: Utilizar el archivo **df3** que se encuentra en la carpeta **data**
###Code
import pandas as pd
import matplotlib.pyplot as plt
df3 = pd.read_csv('../data/df3')
%matplotlib inline
df3.info()
df3.head()
###Output
_____no_output_____ |
COVID-19 testing.ipynb | ###Markdown
COVID-19 TestingHow useful are tests for determining who has a disease? It depends on both the test and how common the disease is in the population being tested.[This article](https://theconversation.com/coronavirus-surprisingly-big-problems-caused-by-small-errors-in-testing-136700) in the Conversation presents a great overview of the counterintuitive results that can occour when testing populations for diseases.When testing the general population, the problem is correctly identifying people who have a rare disease - the true positives can easily be swamped by the false positives. You will find more false positives than true positives.If we instead think about identifying people who are admitted to hospitals, the problem is reversed. Most people have the disease, so the false negatives will swamp the true negatives. You will find more false negatives than true negatives. Sensitivity and Specificty of a testThese two terms, sensitivity and specificity, have very particular meanings. - Sensitivity measures the proportion of true positives that are correctly identified (you have the disease, and the test correctly gives you a positive result)- Specificty measures the proportion of true negatives that are correctly identified (you don't have the disease, and the test correctly gives you a negative result)The Wikipedia article on this [topic](https://en.wikipedia.org/wiki/Sensitivity_and_specificity) is good, but dense. To use this notebookClick on "Cell" in the menu across the top, then, click "Run All". You can then adjust the sliders near the bottom of the notebook by clicking on them and dragging them side-to-side.
###Code
import matplotlib.pyplot as plt
import ipywidgets
import locale
locale.setlocale(locale.LC_ALL, '') # Use '' for auto, or force e.g. to 'en_US.UTF-8'
%matplotlib inline
###Output
_____no_output_____
###Markdown
Let's define a funtion tht returns the number of true positives, false positives, true negatives, and false negatives for a given set of test parameters.
###Code
def test_results(n_tests, prop_pos, sensitivity, specificity):
TP = n_tests*prop_pos*sensitivity
FN = n_tests*prop_pos*(1-sensitivity)
TN = n_tests*(1-prop_pos)*specificity
FP = n_tests*(1-prop_pos)*(1-specificity)
return TP, FP, TN, FN
###Output
_____no_output_____
###Markdown
Now define a function to plot these results.
###Code
def plot_results(n_tests=1e4, prop_pos=0.03, sensitivity=0.938, specificity=0.956):
TP, FP, TN, FN = test_results(n_tests, prop_pos, sensitivity, specificity)
plt.figure(figsize=(10,10))
# population
plt.barh(0, n_tests, label='Total tests administered = {:,}'.format(n_tests))
plt.text(0.1*n_tests,0, 'Total tests administered {:,}'.format(n_tests), fontsize=20)
# population distribution
plt.barh(-1, n_tests*prop_pos, left=-n_tests*0.025,
label='Have disease = {:n}'.format(round(n_tests*prop_pos)))
if prop_pos >= 0.4:
plt.text(0, -1, 'Have disease \n{:,}'.format(round(n_tests*prop_pos)), fontsize=20)
plt.barh(-1, n_tests*(1-prop_pos), left=n_tests*prop_pos + n_tests*0.025,
label='Do not have disease = {:n}'.format(round(n_tests*(1-prop_pos))))
if prop_pos < 0.5:
plt.text(0.05*n_tests + n_tests*prop_pos, -1,
'Do not have disease \n{:,}'.format(round(n_tests*(1-prop_pos))), fontsize=20)
# test results
plt.barh(-2, TP, left=-0.075*n_tests, label='True positives = {:n}'.format(round(TP)))
plt.text(-0.1*n_tests + TP/2, -1.55, 'TP', fontsize=18)
plt.barh(-2, FN, left=TP-0.025*n_tests, label='False Negatives = {:n}'.format(round(FN)))
plt.text(TP-0.05*n_tests + FN/2, -1.55, 'FN', fontsize=18)
plt.barh(-2, TN, left=TP+FN + 0.025*n_tests, label='True Negatives = {:n}'.format(round(TN)))
plt.text(TP+FN + TN/2, -1.55, 'TN', fontsize=18)
plt.barh(-2, FP, left=TP+FN+TN + 0.075*n_tests, label='False Positives = {:n}'.format(round(FP)))
plt.text(TP+FN+TN+0.05*n_tests + FP/2, -1.55, 'FP', fontsize=18)
plt.legend(fontsize=20, bbox_to_anchor=(1.,0.5))
plt.text(0, -2.6,
'Percentage of positive results that are true positives = {:2.2f}%'.format(100*TP/(TP+FP)),
fontsize=18)
plt.text(0, -2.8,
'Percentage of negative results that are true negatives = {:2.2f}%'.format(100*TN/(TN+FN)),
fontsize=18)
plt.xlim(-0.1*n_tests, 1.15*n_tests)
plt.axis('off')
plt.show()
###Output
_____no_output_____
###Markdown
Remember these definitions:how many tests are administered: **n_tests**true proportion of population with the disease: **prop_pos****sensitivity** is the proportion of people with the disease who are correctly identified**specificity** is the proportion of people without the disease who are correctly identified
###Code
ipywidgets.interact(plot_results,
n_tests=(1000,100000),
prop_pos=(0.0,1,0.01),
sensitivity=(0.7,1,0.01),
specificity=(0.7,1,0.01)
);
###Output
_____no_output_____ |
intro-to-pytorch/.ipynb_checkpoints/Part 3 - Training Neural Networks (Exercises)-checkpoint.ipynb | ###Markdown
Training Neural NetworksThe network we built in the previous part isn't so smart, it doesn't know anything about our handwritten digits. Neural networks with non-linear activations work like universal function approximators. There is some function that maps your input to the output. For example, images of handwritten digits to class probabilities. The power of neural networks is that we can train them to approximate this function, and basically any function given enough data and compute time.At first the network is naive, it doesn't know the function mapping the inputs to the outputs. We train the network by showing it examples of real data, then adjusting the network parameters such that it approximates this function.To find these parameters, we need to know how poorly the network is predicting the real outputs. For this we calculate a **loss function** (also called the cost), a measure of our prediction error. For example, the mean squared loss is often used in regression and binary classification problems$$\large \ell = \frac{1}{2n}\sum_i^n{\left(y_i - \hat{y}_i\right)^2}$$where $n$ is the number of training examples, $y_i$ are the true labels, and $\hat{y}_i$ are the predicted labels.By minimizing this loss with respect to the network parameters, we can find configurations where the loss is at a minimum and the network is able to predict the correct labels with high accuracy. We find this minimum using a process called **gradient descent**. The gradient is the slope of the loss function and points in the direction of fastest change. To get to the minimum in the least amount of time, we then want to follow the gradient (downwards). You can think of this like descending a mountain by following the steepest slope to the base. BackpropagationFor single layer networks, gradient descent is straightforward to implement. However, it's more complicated for deeper, multilayer neural networks like the one we've built. Complicated enough that it took about 30 years before researchers figured out how to train multilayer networks.Training multilayer networks is done through **backpropagation** which is really just an application of the chain rule from calculus. It's easiest to understand if we convert a two layer network into a graph representation.In the forward pass through the network, our data and operations go from bottom to top here. We pass the input $x$ through a linear transformation $L_1$ with weights $W_1$ and biases $b_1$. The output then goes through the sigmoid operation $S$ and another linear transformation $L_2$. Finally we calculate the loss $\ell$. We use the loss as a measure of how bad the network's predictions are. The goal then is to adjust the weights and biases to minimize the loss.To train the weights with gradient descent, we propagate the gradient of the loss backwards through the network. Each operation has some gradient between the inputs and outputs. As we send the gradients backwards, we multiply the incoming gradient with the gradient for the operation. Mathematically, this is really just calculating the gradient of the loss with respect to the weights using the chain rule.$$\large \frac{\partial \ell}{\partial W_1} = \frac{\partial L_1}{\partial W_1} \frac{\partial S}{\partial L_1} \frac{\partial L_2}{\partial S} \frac{\partial \ell}{\partial L_2}$$**Note:** I'm glossing over a few details here that require some knowledge of vector calculus, but they aren't necessary to understand what's going on.We update our weights using this gradient with some learning rate $\alpha$. $$\large W^\prime_1 = W_1 - \alpha \frac{\partial \ell}{\partial W_1}$$The learning rate $\alpha$ is set such that the weight update steps are small enough that the iterative method settles in a minimum. Losses in PyTorchLet's start by seeing how we calculate the loss with PyTorch. Through the `nn` module, PyTorch provides losses such as the cross-entropy loss (`nn.CrossEntropyLoss`). You'll usually see the loss assigned to `criterion`. As noted in the last part, with a classification problem such as MNIST, we're using the softmax function to predict class probabilities. With a softmax output, you want to use cross-entropy as the loss. To actually calculate the loss, you first define the criterion then pass in the output of your network and the correct labels.Something really important to note here. Looking at [the documentation for `nn.CrossEntropyLoss`](https://pytorch.org/docs/stable/nn.htmltorch.nn.CrossEntropyLoss),> This criterion combines `nn.LogSoftmax()` and `nn.NLLLoss()` in one single class.>> The input is expected to contain scores for each class.This means we need to pass in the raw output of our network into the loss, not the output of the softmax function. This raw output is usually called the *logits* or *scores*. We use the logits because softmax gives you probabilities which will often be very close to zero or one but floating-point numbers can't accurately represent values near zero or one ([read more here](https://docs.python.org/3/tutorial/floatingpoint.html)). It's usually best to avoid doing calculations with probabilities, typically we use log-probabilities.
###Code
import torch
from torch import nn
import torch.nn.functional as F
from torchvision import datasets, transforms
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,)),
])
# Download and load the training data
trainset = datasets.MNIST('/home/ubuntu/.pytorch/MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
###Output
_____no_output_____
###Markdown
NoteIf you haven't seen `nn.Sequential` yet, please finish the end of the Part 2 notebook.
###Code
# Build a feed-forward network
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10))
# Define the loss
criterion = nn.CrossEntropyLoss()
# Get our data
images, labels = next(iter(trainloader))
# Flatten images
images = images.view(images.shape[0], -1)
print(images.shape)
# Forward pass, get our logits
logits = model(images)
# Calculate the loss with the logits and the labels
loss = criterion(logits, labels)
print(loss)
###Output
torch.Size([64, 784])
tensor(2.3194, grad_fn=<NllLossBackward>)
###Markdown
In my experience it's more convenient to build the model with a log-softmax output using `nn.LogSoftmax` or `F.log_softmax` ([documentation](https://pytorch.org/docs/stable/nn.htmltorch.nn.LogSoftmax)). Then you can get the actual probabilities by taking the exponential `torch.exp(output)`. With a log-softmax output, you want to use the negative log likelihood loss, `nn.NLLLoss` ([documentation](https://pytorch.org/docs/stable/nn.htmltorch.nn.NLLLoss)).>**Exercise:** Build a model that returns the log-softmax as the output and calculate the loss using the negative log likelihood loss. Note that for `nn.LogSoftmax` and `F.log_softmax` you'll need to set the `dim` keyword argument appropriately. `dim=0` calculates softmax across the rows, so each column sums to 1, while `dim=1` calculates across the columns so each row sums to 1. Think about what you want the output to be and choose `dim` appropriately.
###Code
# TODO: Build a feed-forward network
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10),
nn.LogSoftmax(dim=1))
# TODO: Define the loss
criterion = nn.NLLLoss()
### Run this to check your work
# Get our data
images, labels = next(iter(trainloader))
# Flatten images
images = images.view(images.shape[0], -1)
# Forward pass, get our logits
logits = model(images)
# Calculate the loss with the logits and the labels
loss = criterion(logits, labels)
print(loss)
###Output
tensor(2.2875, grad_fn=<NllLossBackward>)
###Markdown
AutogradNow that we know how to calculate a loss, how do we use it to perform backpropagation? Torch provides a module, `autograd`, for automatically calculating the gradients of tensors. We can use it to calculate the gradients of all our parameters with respect to the loss. Autograd works by keeping track of operations performed on tensors, then going backwards through those operations, calculating gradients along the way. To make sure PyTorch keeps track of operations on a tensor and calculates the gradients, you need to set `requires_grad = True` on a tensor. You can do this at creation with the `requires_grad` keyword, or at any time with `x.requires_grad_(True)`.You can turn off gradients for a block of code with the `torch.no_grad()` content:```pythonx = torch.zeros(1, requires_grad=True)>>> with torch.no_grad():... y = x * 2>>> y.requires_gradFalse```Also, you can turn on or off gradients altogether with `torch.set_grad_enabled(True|False)`.The gradients are computed with respect to some variable `z` with `z.backward()`. This does a backward pass through the operations that created `z`.
###Code
x = torch.randn(2,3, requires_grad=True)
print(x)
y = x**2
print(y)
###Output
tensor([[3.1588, 0.0931, 4.3576],
[1.5281, 0.0154, 0.4664]], grad_fn=<PowBackward0>)
###Markdown
Below we can see the operation that created `y`, a power operation `PowBackward0`.
###Code
## grad_fn shows the function that generated this variable
print(y.grad_fn)
###Output
<PowBackward0 object at 0x7f328cd88198>
###Markdown
The autograd module keeps track of these operations and knows how to calculate the gradient for each one. In this way, it's able to calculate the gradients for a chain of operations, with respect to any one tensor. Let's reduce the tensor `y` to a scalar value, the mean.
###Code
z = y.mean()
print(z)
###Output
tensor(1.6032, grad_fn=<MeanBackward1>)
###Markdown
You can check the gradients for `x` and `y` but they are empty currently.
###Code
print(x.grad)
###Output
None
###Markdown
To calculate the gradients, you need to run the `.backward` method on a Variable, `z` for example. This will calculate the gradient for `z` with respect to `x`$$\frac{\partial z}{\partial x} = \frac{\partial}{\partial x}\left[\frac{1}{n}\sum_i^n x_i^2\right] = \frac{x}{2}$$
###Code
z.backward()
print(x.grad)
print(x/3)
###Output
tensor([[ 0.5924, -0.1017, 0.6958],
[ 0.4121, -0.0413, 0.2276]])
tensor([[ 0.5924, -0.1017, 0.6958],
[ 0.4121, -0.0413, 0.2276]], grad_fn=<DivBackward0>)
###Markdown
These gradients calculations are particularly useful for neural networks. For training we need the gradients of the cost with respect to the weights. With PyTorch, we run data forward through the network to calculate the loss, then, go backwards to calculate the gradients with respect to the loss. Once we have the gradients we can make a gradient descent step. Loss and Autograd togetherWhen we create a network with PyTorch, all of the parameters are initialized with `requires_grad = True`. This means that when we calculate the loss and call `loss.backward()`, the gradients for the parameters are calculated. These gradients are used to update the weights with gradient descent. Below you can see an example of calculating the gradients using a backwards pass.
###Code
# Build a feed-forward network
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10),
nn.LogSoftmax(dim=1))
criterion = nn.NLLLoss()
images, labels = next(iter(trainloader))
images = images.view(images.shape[0], -1)
logits = model(images)
loss = criterion(logits, labels)
print('Before backward pass: \n', model[0].weight.grad)
loss.backward()
print('After backward pass: \n', model[0].weight.grad)
###Output
Before backward pass:
None
After backward pass:
tensor([[ 0.0013, 0.0013, 0.0013, ..., 0.0013, 0.0013, 0.0013],
[-0.0023, -0.0023, -0.0023, ..., -0.0023, -0.0023, -0.0023],
[ 0.0000, 0.0000, 0.0000, ..., 0.0000, 0.0000, 0.0000],
...,
[-0.0025, -0.0025, -0.0025, ..., -0.0025, -0.0025, -0.0025],
[ 0.0027, 0.0027, 0.0027, ..., 0.0027, 0.0027, 0.0027],
[-0.0003, -0.0003, -0.0003, ..., -0.0003, -0.0003, -0.0003]])
###Markdown
Training the network!There's one last piece we need to start training, an optimizer that we'll use to update the weights with the gradients. We get these from PyTorch's [`optim` package](https://pytorch.org/docs/stable/optim.html). For example we can use stochastic gradient descent with `optim.SGD`. You can see how to define an optimizer below.
###Code
from torch import optim
# Optimizers require the parameters to optimize and a learning rate
optimizer = optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
Now we know how to use all the individual parts so it's time to see how they work together. Let's consider just one learning step before looping through all the data. The general process with PyTorch:* Make a forward pass through the network * Use the network output to calculate the loss* Perform a backward pass through the network with `loss.backward()` to calculate the gradients* Take a step with the optimizer to update the weightsBelow I'll go through one training step and print out the weights and gradients so you can see how it changes. Note that I have a line of code `optimizer.zero_grad()`. When you do multiple backwards passes with the same parameters, the gradients are accumulated. This means that you need to zero the gradients on each training pass or you'll retain gradients from previous training batches.
###Code
print('Initial weights - ', model[0].weight)
images, labels = next(iter(trainloader))
images.resize_(64, 784)
# Clear the gradients, do this because gradients are accumulated
optimizer.zero_grad()
# Forward pass, then backward pass, then update weights
output = model(images)
loss = criterion(output, labels)
loss.backward()
print('Gradient -', model[0].weight.grad)
# Take an update step and few the new weights
optimizer.step()
print('Updated weights - ', model[0].weight)
###Output
Updated weights - Parameter containing:
tensor([[ 0.0136, -0.0027, -0.0282, ..., -0.0103, -0.0271, -0.0127],
[-0.0305, 0.0286, -0.0175, ..., 0.0222, -0.0055, 0.0203],
[-0.0319, -0.0101, -0.0116, ..., 0.0036, -0.0041, -0.0272],
...,
[-0.0322, 0.0074, 0.0265, ..., -0.0261, -0.0351, 0.0040],
[-0.0271, 0.0223, 0.0216, ..., -0.0319, 0.0175, -0.0177],
[-0.0165, -0.0011, 0.0105, ..., -0.0017, 0.0300, -0.0078]],
requires_grad=True)
###Markdown
Training for realNow we'll put this algorithm into a loop so we can go through all the images. Some nomenclature, one pass through the entire dataset is called an *epoch*. So here we're going to loop through `trainloader` to get our training batches. For each batch, we'll doing a training pass where we calculate the loss, do a backwards pass, and update the weights.>**Exercise:** Implement the training pass for our network. If you implemented it correctly, you should see the training loss drop with each epoch.
###Code
## Your solution here
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10),
nn.LogSoftmax(dim=1))
criterion = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(), lr=0.003)
epochs = 5
for e in range(epochs):
running_loss = 0
for images, labels in trainloader:
# Flatten MNIST images into a 784 long vector
images = images.view(images.shape[0], -1)
optimizer.zero_grad()
# TODO: Training pass
logits = model(images)
loss = criterion(logits, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
else:
print(f"Training loss: {running_loss/len(trainloader)}")
###Output
Training loss: 2.3210457022002005
Training loss: 2.3210169770824374
Training loss: 2.321017864670581
Training loss: 2.3210270597990643
###Markdown
With the network trained, we can check out it's predictions.
###Code
%matplotlib inline
import helper
images, labels = next(iter(trainloader))
img = images[0].view(1, 784)
# Turn off gradients to speed up this part
with torch.no_grad():
logps = model(img)
# Output of the network are log-probabilities, need to take exponential for probabilities
ps = torch.exp(logps)
helper.view_classify(img.view(1, 28, 28), ps)
###Output
_____no_output_____
###Markdown
Training Neural NetworksThe network we built in the previous part isn't so smart, it doesn't know anything about our handwritten digits. Neural networks with non-linear activations work like universal function approximators. There is some function that maps your input to the output. For example, images of handwritten digits to class probabilities. The power of neural networks is that we can train them to approximate this function, and basically any function given enough data and compute time.At first the network is naive, it doesn't know the function mapping the inputs to the outputs. We train the network by showing it examples of real data, then adjusting the network parameters such that it approximates this function.To find these parameters, we need to know how poorly the network is predicting the real outputs. For this we calculate a **loss function** (also called the cost), a measure of our prediction error. For example, the mean squared loss is often used in regression and binary classification problems$$\large \ell = \frac{1}{2n}\sum_i^n{\left(y_i - \hat{y}_i\right)^2}$$where $n$ is the number of training examples, $y_i$ are the true labels, and $\hat{y}_i$ are the predicted labels.By minimizing this loss with respect to the network parameters, we can find configurations where the loss is at a minimum and the network is able to predict the correct labels with high accuracy. We find this minimum using a process called **gradient descent**. The gradient is the slope of the loss function and points in the direction of fastest change. To get to the minimum in the least amount of time, we then want to follow the gradient (downwards). You can think of this like descending a mountain by following the steepest slope to the base. BackpropagationFor single layer networks, gradient descent is straightforward to implement. However, it's more complicated for deeper, multilayer neural networks like the one we've built. Complicated enough that it took about 30 years before researchers figured out how to train multilayer networks.Training multilayer networks is done through **backpropagation** which is really just an application of the chain rule from calculus. It's easiest to understand if we convert a two layer network into a graph representation.In the forward pass through the network, our data and operations go from bottom to top here. We pass the input $x$ through a linear transformation $L_1$ with weights $W_1$ and biases $b_1$. The output then goes through the sigmoid operation $S$ and another linear transformation $L_2$. Finally we calculate the loss $\ell$. We use the loss as a measure of how bad the network's predictions are. The goal then is to adjust the weights and biases to minimize the loss.To train the weights with gradient descent, we propagate the gradient of the loss backwards through the network. Each operation has some gradient between the inputs and outputs. As we send the gradients backwards, we multiply the incoming gradient with the gradient for the operation. Mathematically, this is really just calculating the gradient of the loss with respect to the weights using the chain rule.$$\large \frac{\partial \ell}{\partial W_1} = \frac{\partial L_1}{\partial W_1} \frac{\partial S}{\partial L_1} \frac{\partial L_2}{\partial S} \frac{\partial \ell}{\partial L_2}$$**Note:** I'm glossing over a few details here that require some knowledge of vector calculus, but they aren't necessary to understand what's going on.We update our weights using this gradient with some learning rate $\alpha$. $$\large W^\prime_1 = W_1 - \alpha \frac{\partial \ell}{\partial W_1}$$The learning rate $\alpha$ is set such that the weight update steps are small enough that the iterative method settles in a minimum. Losses in PyTorchLet's start by seeing how we calculate the loss with PyTorch. Through the `nn` module, PyTorch provides losses such as the cross-entropy loss (`nn.CrossEntropyLoss`). You'll usually see the loss assigned to `criterion`. As noted in the last part, with a classification problem such as MNIST, we're using the softmax function to predict class probabilities. With a softmax output, you want to use cross-entropy as the loss. To actually calculate the loss, you first define the criterion then pass in the output of your network and the correct labels.Something really important to note here. Looking at [the documentation for `nn.CrossEntropyLoss`](https://pytorch.org/docs/stable/nn.htmltorch.nn.CrossEntropyLoss),> This criterion combines `nn.LogSoftmax()` and `nn.NLLLoss()` in one single class.>> The input is expected to contain scores for each class.This means we need to pass in the raw output of our network into the loss, not the output of the softmax function. This raw output is usually called the *logits* or *scores*. We use the logits because softmax gives you probabilities which will often be very close to zero or one but floating-point numbers can't accurately represent values near zero or one ([read more here](https://docs.python.org/3/tutorial/floatingpoint.html)). It's usually best to avoid doing calculations with probabilities, typically we use log-probabilities.
###Code
import torch
from torch import nn
import torch.nn.functional as F
from torchvision import datasets, transforms
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,)),
])
# Download and load the training data
trainset = datasets.MNIST('~/.pytorch/MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
###Output
_____no_output_____
###Markdown
NoteIf you haven't seen `nn.Sequential` yet, please finish the end of the Part 2 notebook.
###Code
# Build a feed-forward network
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10))
# Define the loss
criterion = nn.CrossEntropyLoss()
# Get our data
images, labels = next(iter(trainloader))
# Flatten images
images = images.view(images.shape[0], -1)
# Forward pass, get our logits
logits = model(images)
# Calculate the loss with the logits and the labels
loss = criterion(logits, labels)
print(loss)
###Output
_____no_output_____
###Markdown
In my experience it's more convenient to build the model with a log-softmax output using `nn.LogSoftmax` or `F.log_softmax` ([documentation](https://pytorch.org/docs/stable/nn.htmltorch.nn.LogSoftmax)). Then you can get the actual probabilities by taking the exponential `torch.exp(output)`. With a log-softmax output, you want to use the negative log likelihood loss, `nn.NLLLoss` ([documentation](https://pytorch.org/docs/stable/nn.htmltorch.nn.NLLLoss)).>**Exercise:** Build a model that returns the log-softmax as the output and calculate the loss using the negative log likelihood loss. Note that for `nn.LogSoftmax` and `F.log_softmax` you'll need to set the `dim` keyword argument appropriately. `dim=0` calculates softmax across the rows, so each column sums to 1, while `dim=1` calculates across the columns so each row sums to 1. Think about what you want the output to be and choose `dim` appropriately.
###Code
# TODO: Build a feed-forward network
model =
# TODO: Define the loss
criterion =
### Run this to check your work
# Get our data
images, labels = next(iter(trainloader))
# Flatten images
images = images.view(images.shape[0], -1)
# Forward pass, get our logits
logits = model(images)
# Calculate the loss with the logits and the labels
loss = criterion(logits, labels)
print(loss)
###Output
_____no_output_____
###Markdown
AutogradNow that we know how to calculate a loss, how do we use it to perform backpropagation? Torch provides a module, `autograd`, for automatically calculating the gradients of tensors. We can use it to calculate the gradients of all our parameters with respect to the loss. Autograd works by keeping track of operations performed on tensors, then going backwards through those operations, calculating gradients along the way. To make sure PyTorch keeps track of operations on a tensor and calculates the gradients, you need to set `requires_grad = True` on a tensor. You can do this at creation with the `requires_grad` keyword, or at any time with `x.requires_grad_(True)`.You can turn off gradients for a block of code with the `torch.no_grad()` content:```pythonx = torch.zeros(1, requires_grad=True)>>> with torch.no_grad():... y = x * 2>>> y.requires_gradFalse```Also, you can turn on or off gradients altogether with `torch.set_grad_enabled(True|False)`.The gradients are computed with respect to some variable `z` with `z.backward()`. This does a backward pass through the operations that created `z`.
###Code
x = torch.randn(2,2, requires_grad=True)
print(x)
y = x**2
print(y)
###Output
_____no_output_____
###Markdown
Below we can see the operation that created `y`, a power operation `PowBackward0`.
###Code
## grad_fn shows the function that generated this variable
print(y.grad_fn)
###Output
_____no_output_____
###Markdown
The autograd module keeps track of these operations and knows how to calculate the gradient for each one. In this way, it's able to calculate the gradients for a chain of operations, with respect to any one tensor. Let's reduce the tensor `y` to a scalar value, the mean.
###Code
z = y.mean()
print(z)
###Output
_____no_output_____
###Markdown
You can check the gradients for `x` and `y` but they are empty currently.
###Code
print(x.grad)
###Output
_____no_output_____
###Markdown
To calculate the gradients, you need to run the `.backward` method on a Variable, `z` for example. This will calculate the gradient for `z` with respect to `x`$$\frac{\partial z}{\partial x} = \frac{\partial}{\partial x}\left[\frac{1}{n}\sum_i^n x_i^2\right] = \frac{x}{2}$$
###Code
z.backward()
print(x.grad)
print(x/2)
###Output
_____no_output_____
###Markdown
These gradients calculations are particularly useful for neural networks. For training we need the gradients of the cost with respect to the weights. With PyTorch, we run data forward through the network to calculate the loss, then, go backwards to calculate the gradients with respect to the loss. Once we have the gradients we can make a gradient descent step. Loss and Autograd togetherWhen we create a network with PyTorch, all of the parameters are initialized with `requires_grad = True`. This means that when we calculate the loss and call `loss.backward()`, the gradients for the parameters are calculated. These gradients are used to update the weights with gradient descent. Below you can see an example of calculating the gradients using a backwards pass.
###Code
# Build a feed-forward network
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10),
nn.LogSoftmax(dim=1))
criterion = nn.NLLLoss()
images, labels = next(iter(trainloader))
images = images.view(images.shape[0], -1)
logits = model(images)
loss = criterion(logits, labels)
print('Before backward pass: \n', model[0].weight.grad)
loss.backward()
print('After backward pass: \n', model[0].weight.grad)
###Output
_____no_output_____
###Markdown
Training the network!There's one last piece we need to start training, an optimizer that we'll use to update the weights with the gradients. We get these from PyTorch's [`optim` package](https://pytorch.org/docs/stable/optim.html). For example we can use stochastic gradient descent with `optim.SGD`. You can see how to define an optimizer below.
###Code
from torch import optim
# Optimizers require the parameters to optimize and a learning rate
optimizer = optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
Now we know how to use all the individual parts so it's time to see how they work together. Let's consider just one learning step before looping through all the data. The general process with PyTorch:* Make a forward pass through the network * Use the network output to calculate the loss* Perform a backward pass through the network with `loss.backward()` to calculate the gradients* Take a step with the optimizer to update the weightsBelow I'll go through one training step and print out the weights and gradients so you can see how it changes. Note that I have a line of code `optimizer.zero_grad()`. When you do multiple backwards passes with the same parameters, the gradients are accumulated. This means that you need to zero the gradients on each training pass or you'll retain gradients from previous training batches.
###Code
print('Initial weights - ', model[0].weight)
images, labels = next(iter(trainloader))
images.resize_(64, 784)
# Clear the gradients, do this because gradients are accumulated
optimizer.zero_grad()
# Forward pass, then backward pass, then update weights
output = model(images)
loss = criterion(output, labels)
loss.backward()
print('Gradient -', model[0].weight.grad)
# Take an update step and few the new weights
optimizer.step()
print('Updated weights - ', model[0].weight)
###Output
_____no_output_____
###Markdown
Training for realNow we'll put this algorithm into a loop so we can go through all the images. Some nomenclature, one pass through the entire dataset is called an *epoch*. So here we're going to loop through `trainloader` to get our training batches. For each batch, we'll doing a training pass where we calculate the loss, do a backwards pass, and update the weights.>**Exercise:** Implement the training pass for our network. If you implemented it correctly, you should see the training loss drop with each epoch.
###Code
## Your solution here
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10),
nn.LogSoftmax(dim=1))
criterion = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(), lr=0.003)
epochs = 5
for e in range(epochs):
running_loss = 0
for images, labels in trainloader:
# Flatten MNIST images into a 784 long vector
images = images.view(images.shape[0], -1)
# TODO: Training pass
loss =
running_loss += loss.item()
else:
print(f"Training loss: {running_loss/len(trainloader)}")
###Output
_____no_output_____
###Markdown
With the network trained, we can check out it's predictions.
###Code
%matplotlib inline
import helper
images, labels = next(iter(trainloader))
img = images[0].view(1, 784)
# Turn off gradients to speed up this part
with torch.no_grad():
logps = model(img)
# Output of the network are log-probabilities, need to take exponential for probabilities
ps = torch.exp(logps)
helper.view_classify(img.view(1, 28, 28), ps)
###Output
_____no_output_____
###Markdown
Training Neural NetworksThe network we built in the previous part isn't so smart, it doesn't know anything about our handwritten digits. Neural networks with non-linear activations work like universal function approximators. There is some function that maps your input to the output. For example, images of handwritten digits to class probabilities. The power of neural networks is that we can train them to approximate this function, and basically any function given enough data and compute time.At first the network is naive, it doesn't know the function mapping the inputs to the outputs. We train the network by showing it examples of real data, then adjusting the network parameters such that it approximates this function.To find these parameters, we need to know how poorly the network is predicting the real outputs. For this we calculate a **loss function** (also called the cost), a measure of our prediction error. For example, the mean squared loss is often used in regression and binary classification problems$$\large \ell = \frac{1}{2n}\sum_i^n{\left(y_i - \hat{y}_i\right)^2}$$where $n$ is the number of training examples, $y_i$ are the true labels, and $\hat{y}_i$ are the predicted labels.By minimizing this loss with respect to the network parameters, we can find configurations where the loss is at a minimum and the network is able to predict the correct labels with high accuracy. We find this minimum using a process called **gradient descent**. The gradient is the slope of the loss function and points in the direction of fastest change. To get to the minimum in the least amount of time, we then want to follow the gradient (downwards). You can think of this like descending a mountain by following the steepest slope to the base. BackpropagationFor single layer networks, gradient descent is straightforward to implement. However, it's more complicated for deeper, multilayer neural networks like the one we've built. Complicated enough that it took about 30 years before researchers figured out how to train multilayer networks.Training multilayer networks is done through **backpropagation** which is really just an application of the chain rule from calculus. It's easiest to understand if we convert a two layer network into a graph representation.In the forward pass through the network, our data and operations go from bottom to top here. We pass the input $x$ through a linear transformation $L_1$ with weights $W_1$ and biases $b_1$. The output then goes through the sigmoid operation $S$ and another linear transformation $L_2$. Finally we calculate the loss $\ell$. We use the loss as a measure of how bad the network's predictions are. The goal then is to adjust the weights and biases to minimize the loss.To train the weights with gradient descent, we propagate the gradient of the loss backwards through the network. Each operation has some gradient between the inputs and outputs. As we send the gradients backwards, we multiply the incoming gradient with the gradient for the operation. Mathematically, this is really just calculating the gradient of the loss with respect to the weights using the chain rule.$$\large \frac{\partial \ell}{\partial W_1} = \frac{\partial L_1}{\partial W_1} \frac{\partial S}{\partial L_1} \frac{\partial L_2}{\partial S} \frac{\partial \ell}{\partial L_2}$$**Note:** I'm glossing over a few details here that require some knowledge of vector calculus, but they aren't necessary to understand what's going on.We update our weights using this gradient with some learning rate $\alpha$. $$\large W^\prime_1 = W_1 - \alpha \frac{\partial \ell}{\partial W_1}$$The learning rate $\alpha$ is set such that the weight update steps are small enough that the iterative method settles in a minimum. Losses in PyTorchLet's start by seeing how we calculate the loss with PyTorch. Through the `nn` module, PyTorch provides losses such as the cross-entropy loss (`nn.CrossEntropyLoss`). You'll usually see the loss assigned to `criterion`. As noted in the last part, with a classification problem such as MNIST, we're using the softmax function to predict class probabilities. With a softmax output, you want to use cross-entropy as the loss. To actually calculate the loss, you first define the criterion then pass in the output of your network and the correct labels.Something really important to note here. Looking at [the documentation for `nn.CrossEntropyLoss`](https://pytorch.org/docs/stable/nn.htmltorch.nn.CrossEntropyLoss),> This criterion combines `nn.LogSoftmax()` and `nn.NLLLoss()` in one single class.>> The input is expected to contain scores for each class.This means we need to pass in the raw output of our network into the loss, not the output of the softmax function. This raw output is usually called the *logits* or *scores*. We use the logits because softmax gives you probabilities which will often be very close to zero or one but floating-point numbers can't accurately represent values near zero or one ([read more here](https://docs.python.org/3/tutorial/floatingpoint.html)). It's usually best to avoid doing calculations with probabilities, typically we use log-probabilities.
###Code
import torch
from torch import nn
import torch.nn.functional as F
from torchvision import datasets, transforms
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,)),
])
# Download and load the training data
trainset = datasets.MNIST('~/.pytorch/MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
###Output
_____no_output_____
###Markdown
NoteIf you haven't seen `nn.Sequential` yet, please finish the end of the Part 2 notebook.
###Code
# Build a feed-forward network
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10))
# Define the loss
criterion = nn.CrossEntropyLoss()
# Get our data
images, labels = next(iter(trainloader))
# Flatten images
images = images.view(images.shape[0], -1)
# Forward pass, get our logits
logits = model(images)
# Calculate the loss with the logits and the labels
loss = criterion(logits, labels)
print(loss)
###Output
_____no_output_____
###Markdown
In my experience it's more convenient to build the model with a log-softmax output using `nn.LogSoftmax` or `F.log_softmax` ([documentation](https://pytorch.org/docs/stable/nn.htmltorch.nn.LogSoftmax)). Then you can get the actual probabilities by taking the exponential `torch.exp(output)`. With a log-softmax output, you want to use the negative log likelihood loss, `nn.NLLLoss` ([documentation](https://pytorch.org/docs/stable/nn.htmltorch.nn.NLLLoss)).>**Exercise:** Build a model that returns the log-softmax as the output and calculate the loss using the negative log likelihood loss. Note that for `nn.LogSoftmax` and `F.log_softmax` you'll need to set the `dim` keyword argument appropriately. `dim=0` calculates softmax across the rows, so each column sums to 1, while `dim=1` calculates across the columns so each row sums to 1. Think about what you want the output to be and choose `dim` appropriately.
###Code
# TODO: Build a feed-forward network
model =
# TODO: Define the loss
criterion =
### Run this to check your work
# Get our data
images, labels = next(iter(trainloader))
# Flatten images
images = images.view(images.shape[0], -1)
# Forward pass, get our logits
logits = model(images)
# Calculate the loss with the logits and the labels
loss = criterion(logits, labels)
print(loss)
###Output
_____no_output_____
###Markdown
AutogradNow that we know how to calculate a loss, how do we use it to perform backpropagation? Torch provides a module, `autograd`, for automatically calculating the gradients of tensors. We can use it to calculate the gradients of all our parameters with respect to the loss. Autograd works by keeping track of operations performed on tensors, then going backwards through those operations, calculating gradients along the way. To make sure PyTorch keeps track of operations on a tensor and calculates the gradients, you need to set `requires_grad = True` on a tensor. You can do this at creation with the `requires_grad` keyword, or at any time with `x.requires_grad_(True)`.You can turn off gradients for a block of code with the `torch.no_grad()` content:```pythonx = torch.zeros(1, requires_grad=True)>>> with torch.no_grad():... y = x * 2>>> y.requires_gradFalse```Also, you can turn on or off gradients altogether with `torch.set_grad_enabled(True|False)`.The gradients are computed with respect to some variable `z` with `z.backward()`. This does a backward pass through the operations that created `z`.
###Code
x = torch.randn(2,2, requires_grad=True)
print(x)
y = x**2
print(y)
###Output
_____no_output_____
###Markdown
Below we can see the operation that created `y`, a power operation `PowBackward0`.
###Code
## grad_fn shows the function that generated this variable
print(y.grad_fn)
###Output
_____no_output_____
###Markdown
The autograd module keeps track of these operations and knows how to calculate the gradient for each one. In this way, it's able to calculate the gradients for a chain of operations, with respect to any one tensor. Let's reduce the tensor `y` to a scalar value, the mean.
###Code
z = y.mean()
print(z)
###Output
_____no_output_____
###Markdown
You can check the gradients for `x` and `y` but they are empty currently.
###Code
print(x.grad)
###Output
_____no_output_____
###Markdown
To calculate the gradients, you need to run the `.backward` method on a Variable, `z` for example. This will calculate the gradient for `z` with respect to `x`$$\frac{\partial z}{\partial x} = \frac{\partial}{\partial x}\left[\frac{1}{n}\sum_i^n x_i^2\right] = \frac{x}{2}$$
###Code
z.backward()
print(x.grad)
print(x/2)
###Output
_____no_output_____
###Markdown
These gradients calculations are particularly useful for neural networks. For training we need the gradients of the cost with respect to the weights. With PyTorch, we run data forward through the network to calculate the loss, then, go backwards to calculate the gradients with respect to the loss. Once we have the gradients we can make a gradient descent step. Loss and Autograd togetherWhen we create a network with PyTorch, all of the parameters are initialized with `requires_grad = True`. This means that when we calculate the loss and call `loss.backward()`, the gradients for the parameters are calculated. These gradients are used to update the weights with gradient descent. Below you can see an example of calculating the gradients using a backwards pass.
###Code
# Build a feed-forward network
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10),
nn.LogSoftmax(dim=1))
criterion = nn.NLLLoss()
images, labels = next(iter(trainloader))
images = images.view(images.shape[0], -1)
logits = model(images)
loss = criterion(logits, labels)
print('Before backward pass: \n', model[0].weight.grad)
loss.backward()
print('After backward pass: \n', model[0].weight.grad)
###Output
_____no_output_____
###Markdown
Training the network!There's one last piece we need to start training, an optimizer that we'll use to update the weights with the gradients. We get these from PyTorch's [`optim` package](https://pytorch.org/docs/stable/optim.html). For example we can use stochastic gradient descent with `optim.SGD`. You can see how to define an optimizer below.
###Code
from torch import optim
# Optimizers require the parameters to optimize and a learning rate
optimizer = optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
Now we know how to use all the individual parts so it's time to see how they work together. Let's consider just one learning step before looping through all the data. The general process with PyTorch:* Make a forward pass through the network * Use the network output to calculate the loss* Perform a backward pass through the network with `loss.backward()` to calculate the gradients* Take a step with the optimizer to update the weightsBelow I'll go through one training step and print out the weights and gradients so you can see how it changes. Note that I have a line of code `optimizer.zero_grad()`. When you do multiple backwards passes with the same parameters, the gradients are accumulated. This means that you need to zero the gradients on each training pass or you'll retain gradients from previous training batches.
###Code
print('Initial weights - ', model[0].weight)
images, labels = next(iter(trainloader))
images.resize_(64, 784)
# Clear the gradients, do this because gradients are accumulated
optimizer.zero_grad()
# Forward pass, then backward pass, then update weights
output = model(images)
loss = criterion(output, labels)
loss.backward()
print('Gradient -', model[0].weight.grad)
# Take an update step and few the new weights
optimizer.step()
print('Updated weights - ', model[0].weight)
###Output
_____no_output_____
###Markdown
Training for realNow we'll put this algorithm into a loop so we can go through all the images. Some nomenclature, one pass through the entire dataset is called an *epoch*. So here we're going to loop through `trainloader` to get our training batches. For each batch, we'll doing a training pass where we calculate the loss, do a backwards pass, and update the weights.>**Exercise:** Implement the training pass for our network. If you implemented it correctly, you should see the training loss drop with each epoch.
###Code
## Your solution here
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10),
nn.LogSoftmax(dim=1))
criterion = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(), lr=0.003)
epochs = 5
for e in range(epochs):
running_loss = 0
for images, labels in trainloader:
# Flatten MNIST images into a 784 long vector
images = images.view(images.shape[0], -1)
# TODO: Training pass
loss =
running_loss += loss.item()
else:
print(f"Training loss: {running_loss/len(trainloader)}")
###Output
_____no_output_____
###Markdown
With the network trained, we can check out it's predictions.
###Code
%matplotlib inline
import helper
images, labels = next(iter(trainloader))
img = images[0].view(1, 784)
# Turn off gradients to speed up this part
with torch.no_grad():
logps = model(img)
# Output of the network are log-probabilities, need to take exponential for probabilities
ps = torch.exp(logps)
helper.view_classify(img.view(1, 28, 28), ps)
###Output
_____no_output_____
###Markdown
Training Neural NetworksThe network we built in the previous part isn't so smart, it doesn't know anything about our handwritten digits. Neural networks with non-linear activations work like universal function approximators. There is some function that maps your input to the output. For example, images of handwritten digits to class probabilities. The power of neural networks is that we can train them to approximate this function, and basically any function given enough data and compute time.At first the network is naive, it doesn't know the function mapping the inputs to the outputs. We train the network by showing it examples of real data, then adjusting the network parameters such that it approximates this function.To find these parameters, we need to know how poorly the network is predicting the real outputs. For this we calculate a **loss function** (also called the cost), a measure of our prediction error. For example, the mean squared loss is often used in regression and binary classification problems$$\large \ell = \frac{1}{2n}\sum_i^n{\left(y_i - \hat{y}_i\right)^2}$$where $n$ is the number of training examples, $y_i$ are the true labels, and $\hat{y}_i$ are the predicted labels.By minimizing this loss with respect to the network parameters, we can find configurations where the loss is at a minimum and the network is able to predict the correct labels with high accuracy. We find this minimum using a process called **gradient descent**. The gradient is the slope of the loss function and points in the direction of fastest change. To get to the minimum in the least amount of time, we then want to follow the gradient (downwards). You can think of this like descending a mountain by following the steepest slope to the base. BackpropagationFor single layer networks, gradient descent is straightforward to implement. However, it's more complicated for deeper, multilayer neural networks like the one we've built. Complicated enough that it took about 30 years before researchers figured out how to train multilayer networks.Training multilayer networks is done through **backpropagation** which is really just an application of the chain rule from calculus. It's easiest to understand if we convert a two layer network into a graph representation.In the forward pass through the network, our data and operations go from bottom to top here. We pass the input $x$ through a linear transformation $L_1$ with weights $W_1$ and biases $b_1$. The output then goes through the sigmoid operation $S$ and another linear transformation $L_2$. Finally we calculate the loss $\ell$. We use the loss as a measure of how bad the network's predictions are. The goal then is to adjust the weights and biases to minimize the loss.To train the weights with gradient descent, we propagate the gradient of the loss backwards through the network. Each operation has some gradient between the inputs and outputs. As we send the gradients backwards, we multiply the incoming gradient with the gradient for the operation. Mathematically, this is really just calculating the gradient of the loss with respect to the weights using the chain rule.$$\large \frac{\partial \ell}{\partial W_1} = \frac{\partial L_1}{\partial W_1} \frac{\partial S}{\partial L_1} \frac{\partial L_2}{\partial S} \frac{\partial \ell}{\partial L_2}$$**Note:** I'm glossing over a few details here that require some knowledge of vector calculus, but they aren't necessary to understand what's going on.We update our weights using this gradient with some learning rate $\alpha$. $$\large W^\prime_1 = W_1 - \alpha \frac{\partial \ell}{\partial W_1}$$The learning rate $\alpha$ is set such that the weight update steps are small enough that the iterative method settles in a minimum. Losses in PyTorchLet's start by seeing how we calculate the loss with PyTorch. Through the `nn` module, PyTorch provides losses such as the cross-entropy loss (`nn.CrossEntropyLoss`). You'll usually see the loss assigned to `criterion`. As noted in the last part, with a classification problem such as MNIST, we're using the softmax function to predict class probabilities. With a softmax output, you want to use cross-entropy as the loss. To actually calculate the loss, you first define the criterion then pass in the output of your network and the correct labels.Something really important to note here. Looking at [the documentation for `nn.CrossEntropyLoss`](https://pytorch.org/docs/stable/nn.htmltorch.nn.CrossEntropyLoss),> This criterion combines `nn.LogSoftmax()` and `nn.NLLLoss()` in one single class.>> The input is expected to contain scores for each class.This means we need to pass in the raw output of our network into the loss, not the output of the softmax function. This raw output is usually called the *logits* or *scores*. We use the logits because softmax gives you probabilities which will often be very close to zero or one but floating-point numbers can't accurately represent values near zero or one ([read more here](https://docs.python.org/3/tutorial/floatingpoint.html)). It's usually best to avoid doing calculations with probabilities, typically we use log-probabilities.
###Code
import torch
from torch import nn
import torch.nn.functional as F
from torchvision import datasets, transforms
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,)),
])
# Download and load the training data
trainset = datasets.MNIST('~/.pytorch/MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
###Output
_____no_output_____
###Markdown
NoteIf you haven't seen `nn.Sequential` yet, please finish the end of the Part 2 notebook.
###Code
# Build a feed-forward network
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10))
# Define the loss
criterion = nn.CrossEntropyLoss()
# Get our data
images, labels = next(iter(trainloader))
# Flatten images
images = images.view(images.shape[0], -1)
# Forward pass, get our logits
logits = model(images)
# Calculate the loss with the logits and the labels
loss = criterion(logits, labels)
print(loss)
###Output
tensor(2.3030, grad_fn=<NllLossBackward>)
###Markdown
In my experience it's more convenient to build the model with a log-softmax output using `nn.LogSoftmax` or `F.log_softmax` ([documentation](https://pytorch.org/docs/stable/nn.htmltorch.nn.LogSoftmax)). Then you can get the actual probabilities by taking the exponential `torch.exp(output)`. With a log-softmax output, you want to use the negative log likelihood loss, `nn.NLLLoss` ([documentation](https://pytorch.org/docs/stable/nn.htmltorch.nn.NLLLoss)).>**Exercise:** Build a model that returns the log-softmax as the output and calculate the loss using the negative log likelihood loss. Note that for `nn.LogSoftmax` and `F.log_softmax` you'll need to set the `dim` keyword argument appropriately. `dim=0` calculates softmax across the rows, so each column sums to 1, while `dim=1` calculates across the columns so each row sums to 1. Think about what you want the output to be and choose `dim` appropriately.
###Code
# TODO: Build a feed-forward network
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10),
nn.LogSoftmax(dim=1)
)
# TODO: Define the loss
criterion = nn.NLLLoss()
### Run this to check your work
# Get our data
images, labels = next(iter(trainloader))
# Flatten images
images = images.view(images.shape[0], -1)
# Forward pass, get our logits
logits = model(images)
# Calculate the loss with the logits and the labels
loss = criterion(logits, labels)
print(loss)
###Output
tensor(2.3159, grad_fn=<NllLossBackward>)
###Markdown
AutogradNow that we know how to calculate a loss, how do we use it to perform backpropagation? Torch provides a module, `autograd`, for automatically calculating the gradients of tensors. We can use it to calculate the gradients of all our parameters with respect to the loss. Autograd works by keeping track of operations performed on tensors, then going backwards through those operations, calculating gradients along the way. To make sure PyTorch keeps track of operations on a tensor and calculates the gradients, you need to set `requires_grad = True` on a tensor. You can do this at creation with the `requires_grad` keyword, or at any time with `x.requires_grad_(True)`.You can turn off gradients for a block of code with the `torch.no_grad()` content:```pythonx = torch.zeros(1, requires_grad=True)>>> with torch.no_grad():... y = x * 2>>> y.requires_gradFalse```Also, you can turn on or off gradients altogether with `torch.set_grad_enabled(True|False)`.The gradients are computed with respect to some variable `z` with `z.backward()`. This does a backward pass through the operations that created `z`.
###Code
x = torch.randn(2,2, requires_grad=True)
print(x)
y = x**2
print(y)
###Output
tensor([[1.0752, 0.1403],
[0.1677, 0.4560]], grad_fn=<PowBackward0>)
###Markdown
Below we can see the operation that created `y`, a power operation `PowBackward0`.
###Code
## grad_fn shows the function that generated this variable
print(y.grad_fn)
###Output
<PowBackward0 object at 0x1213f38d0>
###Markdown
The autograd module keeps track of these operations and knows how to calculate the gradient for each one. In this way, it's able to calculate the gradients for a chain of operations, with respect to any one tensor. Let's reduce the tensor `y` to a scalar value, the mean.
###Code
z = y.mean()
print(z)
###Output
tensor(0.4598, grad_fn=<MeanBackward0>)
###Markdown
You can check the gradients for `x` and `y` but they are empty currently.
###Code
print(x.grad)
###Output
None
###Markdown
To calculate the gradients, you need to run the `.backward` method on a Variable, `z` for example. This will calculate the gradient for `z` with respect to `x`$$\frac{\partial z}{\partial x} = \frac{\partial}{\partial x}\left[\frac{1}{n}\sum_i^n x_i^2\right] = \frac{x}{2}$$
###Code
z.backward()
print(x.grad)
print(x/2)
###Output
tensor([[-0.5185, -0.1873],
[-0.2048, 0.3376]])
tensor([[-0.5185, -0.1873],
[-0.2048, 0.3376]], grad_fn=<DivBackward0>)
###Markdown
These gradients calculations are particularly useful for neural networks. For training we need the gradients of the cost with respect to the weights. With PyTorch, we run data forward through the network to calculate the loss, then, go backwards to calculate the gradients with respect to the loss. Once we have the gradients we can make a gradient descent step. Loss and Autograd togetherWhen we create a network with PyTorch, all of the parameters are initialized with `requires_grad = True`. This means that when we calculate the loss and call `loss.backward()`, the gradients for the parameters are calculated. These gradients are used to update the weights with gradient descent. Below you can see an example of calculating the gradients using a backwards pass.
###Code
# Build a feed-forward network
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10),
nn.LogSoftmax(dim=1))
criterion = nn.NLLLoss()
images, labels = next(iter(trainloader))
images = images.view(images.shape[0], -1)
logits = model(images)
loss = criterion(logits, labels)
print('Before backward pass: \n', model[0].weight.grad)
loss.backward()
print('After backward pass: \n', model[0].weight.grad)
###Output
Before backward pass:
None
After backward pass:
tensor([[-1.1774e-03, -1.1774e-03, -1.1774e-03, ..., -1.1774e-03,
-1.1774e-03, -1.1774e-03],
[-3.3871e-04, -3.3871e-04, -3.3871e-04, ..., -3.3871e-04,
-3.3871e-04, -3.3871e-04],
[-3.8453e-04, -3.8453e-04, -3.8453e-04, ..., -3.8453e-04,
-3.8453e-04, -3.8453e-04],
...,
[ 7.7276e-06, 7.7276e-06, 7.7276e-06, ..., 7.7276e-06,
7.7276e-06, 7.7276e-06],
[ 0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,
0.0000e+00, 0.0000e+00],
[ 3.7773e-06, 3.7773e-06, 3.7773e-06, ..., 3.7773e-06,
3.7773e-06, 3.7773e-06]])
###Markdown
Training the network!There's one last piece we need to start training, an optimizer that we'll use to update the weights with the gradients. We get these from PyTorch's [`optim` package](https://pytorch.org/docs/stable/optim.html). For example we can use stochastic gradient descent with `optim.SGD`. You can see how to define an optimizer below.
###Code
from torch import optim
# Optimizers require the parameters to optimize and a learning rate
optimizer = optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
Now we know how to use all the individual parts so it's time to see how they work together. Let's consider just one learning step before looping through all the data. The general process with PyTorch:* Make a forward pass through the network * Use the network output to calculate the loss* Perform a backward pass through the network with `loss.backward()` to calculate the gradients* Take a step with the optimizer to update the weightsBelow I'll go through one training step and print out the weights and gradients so you can see how it changes. Note that I have a line of code `optimizer.zero_grad()`. When you do multiple backwards passes with the same parameters, the gradients are accumulated. This means that you need to zero the gradients on each training pass or you'll retain gradients from previous training batches.
###Code
print('Initial weights - ', model[0].weight)
images, labels = next(iter(trainloader))
images.resize_(64, 784)
# Clear the gradients, do this because gradients are accumulated
optimizer.zero_grad()
# Forward pass, then backward pass, then update weights
output = model(images)
loss = criterion(output, labels)
loss.backward()
print('Gradient -', model[0].weight.grad)
# Take an update step and few the new weights
optimizer.step()
print('Updated weights - ', model[0].weight)
###Output
Updated weights - Parameter containing:
tensor([[ 0.0099, 0.0219, -0.0101, ..., 0.0049, 0.0029, -0.0195],
[ 0.0327, -0.0014, -0.0181, ..., 0.0306, 0.0214, -0.0304],
[-0.0289, -0.0281, -0.0014, ..., 0.0347, 0.0092, 0.0338],
...,
[-0.0257, 0.0212, -0.0151, ..., 0.0189, 0.0185, 0.0002],
[ 0.0032, -0.0106, 0.0280, ..., 0.0130, 0.0226, 0.0104],
[-0.0002, 0.0129, -0.0117, ..., -0.0088, 0.0100, 0.0084]],
requires_grad=True)
###Markdown
Training for realNow we'll put this algorithm into a loop so we can go through all the images. Some nomenclature, one pass through the entire dataset is called an *epoch*. So here we're going to loop through `trainloader` to get our training batches. For each batch, we'll doing a training pass where we calculate the loss, do a backwards pass, and update the weights.>**Exercise:** Implement the training pass for our network. If you implemented it correctly, you should see the training loss drop with each epoch.
###Code
## Your solution here
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10),
nn.LogSoftmax(dim=1))
criterion = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(), lr=0.003)
epochs = 5
for e in range(epochs):
running_loss = 0
for images, labels in trainloader:
# Flatten MNIST images into a 784 long vector
images = images.view(images.shape[0], -1)
# TODO: Training pass
optimizer.zero_grad()
output = model(images)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
else:
print(f"Training loss: {running_loss/len(trainloader)}")
###Output
Training loss: 1.9027180365407899
Training loss: 0.8229409093732265
Training loss: 0.5132966474778871
Training loss: 0.42179053117916276
Training loss: 0.37855648114355894
###Markdown
With the network trained, we can check out it's predictions.
###Code
%matplotlib inline
import helper
images, labels = next(iter(trainloader))
img = images[2].view(1, 784)
# Turn off gradients to speed up this part
with torch.no_grad():
logps = model(img)
# Output of the network are log-probabilities, need to take exponential for probabilities
ps = torch.exp(logps)
helper.view_classify(img.view(1, 28, 28), ps)
###Output
_____no_output_____
###Markdown
Training Neural NetworksThe network we built in the previous part isn't so smart, it doesn't know anything about our handwritten digits. Neural networks with non-linear activations work like universal function approximators. There is some function that maps your input to the output. For example, images of handwritten digits to class probabilities. The power of neural networks is that we can train them to approximate this function, and basically any function given enough data and compute time.At first the network is naive, it doesn't know the function mapping the inputs to the outputs. We train the network by showing it examples of real data, then adjusting the network parameters such that it approximates this function.To find these parameters, we need to know how poorly the network is predicting the real outputs. For this we calculate a **loss function** (also called the cost), a measure of our prediction error. For example, the mean squared loss is often used in regression and binary classification problems$$\large \ell = \frac{1}{2n}\sum_i^n{\left(y_i - \hat{y}_i\right)^2}$$where $n$ is the number of training examples, $y_i$ are the true labels, and $\hat{y}_i$ are the predicted labels.By minimizing this loss with respect to the network parameters, we can find configurations where the loss is at a minimum and the network is able to predict the correct labels with high accuracy. We find this minimum using a process called **gradient descent**. The gradient is the slope of the loss function and points in the direction of fastest change. To get to the minimum in the least amount of time, we then want to follow the gradient (downwards). You can think of this like descending a mountain by following the steepest slope to the base. BackpropagationFor single layer networks, gradient descent is straightforward to implement. However, it's more complicated for deeper, multilayer neural networks like the one we've built. Complicated enough that it took about 30 years before researchers figured out how to train multilayer networks.Training multilayer networks is done through **backpropagation** which is really just an application of the chain rule from calculus. It's easiest to understand if we convert a two layer network into a graph representation.In the forward pass through the network, our data and operations go from bottom to top here. We pass the input $x$ through a linear transformation $L_1$ with weights $W_1$ and biases $b_1$. The output then goes through the sigmoid operation $S$ and another linear transformation $L_2$. Finally we calculate the loss $\ell$. We use the loss as a measure of how bad the network's predictions are. The goal then is to adjust the weights and biases to minimize the loss.To train the weights with gradient descent, we propagate the gradient of the loss backwards through the network. Each operation has some gradient between the inputs and outputs. As we send the gradients backwards, we multiply the incoming gradient with the gradient for the operation. Mathematically, this is really just calculating the gradient of the loss with respect to the weights using the chain rule.$$\large \frac{\partial \ell}{\partial W_1} = \frac{\partial L_1}{\partial W_1} \frac{\partial S}{\partial L_1} \frac{\partial L_2}{\partial S} \frac{\partial \ell}{\partial L_2}$$**Note:** I'm glossing over a few details here that require some knowledge of vector calculus, but they aren't necessary to understand what's going on.We update our weights using this gradient with some learning rate $\alpha$. $$\large W^\prime_1 = W_1 - \alpha \frac{\partial \ell}{\partial W_1}$$The learning rate $\alpha$ is set such that the weight update steps are small enough that the iterative method settles in a minimum. Losses in PyTorchLet's start by seeing how we calculate the loss with PyTorch. Through the `nn` module, PyTorch provides losses such as the cross-entropy loss (`nn.CrossEntropyLoss`). You'll usually see the loss assigned to `criterion`. As noted in the last part, with a classification problem such as MNIST, we're using the softmax function to predict class probabilities. With a softmax output, you want to use cross-entropy as the loss. To actually calculate the loss, you first define the criterion then pass in the output of your network and the correct labels.Something really important to note here. Looking at [the documentation for `nn.CrossEntropyLoss`](https://pytorch.org/docs/stable/nn.htmltorch.nn.CrossEntropyLoss),> This criterion combines `nn.LogSoftmax()` and `nn.NLLLoss()` in one single class.>> The input is expected to contain scores for each class.This means we need to pass in the raw output of our network into the loss, not the output of the softmax function. This raw output is usually called the *logits* or *scores*. We use the logits because softmax gives you probabilities which will often be very close to zero or one but floating-point numbers can't accurately represent values near zero or one ([read more here](https://docs.python.org/3/tutorial/floatingpoint.html)). It's usually best to avoid doing calculations with probabilities, typically we use log-probabilities.
###Code
import torch
from torch import nn
import torch.nn.functional as F
from torchvision import datasets, transforms
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
# Download and load the training data
trainset = datasets.MNIST('~/.pytorch/MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
###Output
_____no_output_____
###Markdown
NoteIf you haven't seen `nn.Sequential` yet, please finish the end of the Part 2 notebook.
###Code
# Build a feed-forward network
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10))
# Define the loss
criterion = nn.CrossEntropyLoss()
# Get our data
images, labels = next(iter(trainloader))
# Flatten images
images = images.view(images.shape[0], -1)
# Forward pass, get our logits
logits = model(images)
# Calculate the loss with the logits and the labels
loss = criterion(logits, labels)
print(loss)
###Output
tensor(2.3046, grad_fn=<NllLossBackward>)
###Markdown
In my experience it's more convenient to build the model with a log-softmax output using `nn.LogSoftmax` or `F.log_softmax` ([documentation](https://pytorch.org/docs/stable/nn.htmltorch.nn.LogSoftmax)). Then you can get the actual probabilities by taking the exponential `torch.exp(output)`. With a log-softmax output, you want to use the negative log likelihood loss, `nn.NLLLoss` ([documentation](https://pytorch.org/docs/stable/nn.htmltorch.nn.NLLLoss)).>**Exercise:** Build a model that returns the log-softmax as the output and calculate the loss using the negative log likelihood loss. Note that for `nn.LogSoftmax` and `F.log_softmax` you'll need to set the `dim` keyword argument appropriately. `dim=0` calculates softmax across the rows, so each column sums to 1, while `dim=1` calculates across the columns so each row sums to 1. Think about what you want the output to be and choose `dim` appropriately.
###Code
# TODO: Build a feed-forward network
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10),
nn.LogSoftmax(dim=1))
# TODO: Define the loss
criterion = nn.NLLLoss()
### Run this to check your work
# Get our data
images, labels = next(iter(trainloader))
# Flatten images
images = images.view(images.shape[0], -1)
# Forward pass, get our logits
logits = model(images)
# Calculate the loss with the logits and the labels
loss = criterion(logits, labels)
print(loss)
###Output
tensor(2.3166, grad_fn=<NllLossBackward>)
###Markdown
AutogradNow that we know how to calculate a loss, how do we use it to perform backpropagation? Torch provides a module, `autograd`, for automatically calculating the gradients of tensors. We can use it to calculate the gradients of all our parameters with respect to the loss. Autograd works by keeping track of operations performed on tensors, then going backwards through those operations, calculating gradients along the way. To make sure PyTorch keeps track of operations on a tensor and calculates the gradients, you need to set `requires_grad = True` on a tensor. You can do this at creation with the `requires_grad` keyword, or at any time with `x.requires_grad_(True)`.You can turn off gradients for a block of code with the `torch.no_grad()` content:```pythonx = torch.zeros(1, requires_grad=True)>>> with torch.no_grad():... y = x * 2>>> y.requires_gradFalse```Also, you can turn on or off gradients altogether with `torch.set_grad_enabled(True|False)`.The gradients are computed with respect to some variable `z` with `z.backward()`. This does a backward pass through the operations that created `z`.
###Code
x = torch.randn(2,2, requires_grad=True)
print(x)
y = x**2
print(y)
###Output
tensor([[2.7771, 6.5229],
[0.7056, 0.3139]], grad_fn=<PowBackward0>)
###Markdown
Below we can see the operation that created `y`, a power operation `PowBackward0`.
###Code
## grad_fn shows the function that generated this variable
print(y.grad_fn)
###Output
<PowBackward0 object at 0x7f5808da1438>
###Markdown
The autgrad module keeps track of these operations and knows how to calculate the gradient for each one. In this way, it's able to calculate the gradients for a chain of operations, with respect to any one tensor. Let's reduce the tensor `y` to a scalar value, the mean.
###Code
z = y.mean()
print(z)
###Output
tensor(2.5799, grad_fn=<MeanBackward1>)
###Markdown
You can check the gradients for `x` and `y` but they are empty currently.
###Code
print(x.grad)
###Output
None
###Markdown
To calculate the gradients, you need to run the `.backward` method on a Variable, `z` for example. This will calculate the gradient for `z` with respect to `x`$$\frac{\partial z}{\partial x} = \frac{\partial}{\partial x}\left[\frac{1}{n}\sum_i^n x_i^2\right] = \frac{x}{2}$$
###Code
z.backward()
print(x.grad)
print(x/2)
###Output
tensor([[ 0.8332, -1.2770],
[ 0.4200, -0.2801]])
tensor([[ 0.8332, -1.2770],
[ 0.4200, -0.2801]], grad_fn=<DivBackward0>)
###Markdown
These gradients calculations are particularly useful for neural networks. For training we need the gradients of the weights with respect to the cost. With PyTorch, we run data forward through the network to calculate the loss, then, go backwards to calculate the gradients with respect to the loss. Once we have the gradients we can make a gradient descent step. Loss and Autograd togetherWhen we create a network with PyTorch, all of the parameters are initialized with `requires_grad = True`. This means that when we calculate the loss and call `loss.backward()`, the gradients for the parameters are calculated. These gradients are used to update the weights with gradient descent. Below you can see an example of calculating the gradients using a backwards pass.
###Code
# Build a feed-forward network
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10),
nn.LogSoftmax(dim=1))
criterion = nn.NLLLoss()
images, labels = next(iter(trainloader))
images = images.view(images.shape[0], -1)
logits = model(images)
loss = criterion(logits, labels)
print('Before backward pass: \n', model[0].weight.grad)
loss.backward()
print('After backward pass: \n', model[0].weight.grad)
###Output
Before backward pass:
None
After backward pass:
tensor([[ 0.0023, 0.0023, 0.0023, ..., 0.0023, 0.0023, 0.0023],
[-0.0011, -0.0011, -0.0011, ..., -0.0011, -0.0011, -0.0011],
[-0.0019, -0.0019, -0.0019, ..., -0.0019, -0.0019, -0.0019],
...,
[ 0.0005, 0.0005, 0.0005, ..., 0.0005, 0.0005, 0.0005],
[ 0.0014, 0.0014, 0.0014, ..., 0.0014, 0.0014, 0.0014],
[ 0.0002, 0.0002, 0.0002, ..., 0.0002, 0.0002, 0.0002]])
###Markdown
Training the network!There's one last piece we need to start training, an optimizer that we'll use to update the weights with the gradients. We get these from PyTorch's [`optim` package](https://pytorch.org/docs/stable/optim.html). For example we can use stochastic gradient descent with `optim.SGD`. You can see how to define an optimizer below.
###Code
from torch import optim
# Optimizers require the parameters to optimize and a learning rate
optimizer = optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
Now we know how to use all the individual parts so it's time to see how they work together. Let's consider just one learning step before looping through all the data. The general process with PyTorch:* Make a forward pass through the network * Use the network output to calculate the loss* Perform a backward pass through the network with `loss.backward()` to calculate the gradients* Take a step with the optimizer to update the weightsBelow I'll go through one training step and print out the weights and gradients so you can see how it changes. Note that I have a line of code `optimizer.zero_grad()`. When you do multiple backwards passes with the same parameters, the gradients are accumulated. This means that you need to zero the gradients on each training pass or you'll retain gradients from previous training batches.
###Code
print('Initial weights - ', model[0].weight)
images, labels = next(iter(trainloader))
images.resize_(64, 784)
# Clear the gradients, do this because gradients are accumulated
optimizer.zero_grad()
# Forward pass, then backward pass, then update weights
output = model.forward(images)
loss = criterion(output, labels)
loss.backward()
print('Gradient -', model[0].weight.grad)
# Take an update step and few the new weights
optimizer.step()
print('Updated weights - ', model[0].weight)
###Output
Updated weights - Parameter containing:
tensor([[-0.0050, -0.0281, 0.0227, ..., 0.0036, 0.0039, -0.0168],
[-0.0160, -0.0302, -0.0266, ..., 0.0194, 0.0267, -0.0164],
[-0.0251, 0.0264, 0.0337, ..., -0.0353, -0.0251, -0.0178],
...,
[-0.0195, 0.0121, -0.0209, ..., -0.0184, 0.0333, -0.0076],
[ 0.0059, 0.0197, 0.0251, ..., -0.0004, 0.0301, -0.0323],
[-0.0169, 0.0250, -0.0192, ..., 0.0112, -0.0116, 0.0346]],
requires_grad=True)
###Markdown
Training for realNow we'll put this algorithm into a loop so we can go through all the images. Some nomenclature, one pass through the entire dataset is called an *epoch*. So here we're going to loop through `trainloader` to get our training batches. For each batch, we'll doing a training pass where we calculate the loss, do a backwards pass, and update the weights.>**Exercise:** Implement the training pass for our network. If you implemented it correctly, you should see the training loss drop with each epoch.
###Code
## Your solution here
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10),
nn.LogSoftmax(dim=1))
criterion = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(), lr=0.003)
epochs = 30
for e in range(epochs):
running_loss = 0
for images, labels in trainloader:
# Flatten MNIST images into a 784 long vector
images = images.view(images.shape[0], -1)
# TODO: Training pass
optimizer.zero_grad()
output = model.forward(images)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
else:
print(f"Training loss: {running_loss/len(trainloader)}")
###Output
Training loss: 1.8641564633800531
Training loss: 0.7986210553186026
Training loss: 0.5113726007436384
Training loss: 0.4259752773368028
Training loss: 0.38393260549698305
Training loss: 0.35763437035622625
Training loss: 0.3387354004945455
Training loss: 0.3239599961890722
Training loss: 0.3117030142451019
Training loss: 0.30104615069083823
Training loss: 0.29165933658478105
Training loss: 0.2828007918844091
Training loss: 0.27440730441036
Training loss: 0.2667788985187311
Training loss: 0.2592231780846617
Training loss: 0.25244119430560547
Training loss: 0.2451703878226819
Training loss: 0.23880820491039423
Training loss: 0.23278797513592853
Training loss: 0.22665597946404903
Training loss: 0.22100584023892245
Training loss: 0.21553390783700607
Training loss: 0.21050666390197365
Training loss: 0.20520550118628214
Training loss: 0.2002535045448778
Training loss: 0.195487417205215
Training loss: 0.19082262250247287
Training loss: 0.1863727698257483
Training loss: 0.18213368047560966
Training loss: 0.17816761635275666
###Markdown
With the network trained, we can check out it's predictions.
###Code
%matplotlib inline
import helper
images, labels = next(iter(trainloader))
img = images[0].view(1, 784)
# Turn off gradients to speed up this part
with torch.no_grad():
logits = model.forward(img)
# Output of the network are logits, need to take softmax for probabilities
ps = F.softmax(logits, dim=1)
helper.view_classify(img.view(1, 28, 28), ps)
###Output
_____no_output_____
###Markdown
Training Neural NetworksThe network we built in the previous part isn't so smart, it doesn't know anything about our handwritten digits. Neural networks with non-linear activations work like universal function approximators. There is some function that maps your input to the output. For example, images of handwritten digits to class probabilities. The power of neural networks is that we can train them to approximate this function, and basically any function given enough data and compute time.At first the network is naive, it doesn't know the function mapping the inputs to the outputs. We train the network by showing it examples of real data, then adjusting the network parameters such that it approximates this function.To find these parameters, we need to know how poorly the network is predicting the real outputs. For this we calculate a **loss function** (also called the cost), a measure of our prediction error. For example, the mean squared loss is often used in regression and binary classification problems$$\large \ell = \frac{1}{2n}\sum_i^n{\left(y_i - \hat{y}_i\right)^2}$$where $n$ is the number of training examples, $y_i$ are the true labels, and $\hat{y}_i$ are the predicted labels.By minimizing this loss with respect to the network parameters, we can find configurations where the loss is at a minimum and the network is able to predict the correct labels with high accuracy. We find this minimum using a process called **gradient descent**. The gradient is the slope of the loss function and points in the direction of fastest change. To get to the minimum in the least amount of time, we then want to follow the gradient (downwards). You can think of this like descending a mountain by following the steepest slope to the base. BackpropagationFor single layer networks, gradient descent is straightforward to implement. However, it's more complicated for deeper, multilayer neural networks like the one we've built. Complicated enough that it took about 30 years before researchers figured out how to train multilayer networks.Training multilayer networks is done through **backpropagation** which is really just an application of the chain rule from calculus. It's easiest to understand if we convert a two layer network into a graph representation.In the forward pass through the network, our data and operations go from bottom to top here. We pass the input $x$ through a linear transformation $L_1$ with weights $W_1$ and biases $b_1$. The output then goes through the sigmoid operation $S$ and another linear transformation $L_2$. Finally we calculate the loss $\ell$. We use the loss as a measure of how bad the network's predictions are. The goal then is to adjust the weights and biases to minimize the loss.To train the weights with gradient descent, we propagate the gradient of the loss backwards through the network. Each operation has some gradient between the inputs and outputs. As we send the gradients backwards, we multiply the incoming gradient with the gradient for the operation. Mathematically, this is really just calculating the gradient of the loss with respect to the weights using the chain rule.$$\large \frac{\partial \ell}{\partial W_1} = \frac{\partial L_1}{\partial W_1} \frac{\partial S}{\partial L_1} \frac{\partial L_2}{\partial S} \frac{\partial \ell}{\partial L_2}$$**Note:** I'm glossing over a few details here that require some knowledge of vector calculus, but they aren't necessary to understand what's going on.We update our weights using this gradient with some learning rate $\alpha$. $$\large W^\prime_1 = W_1 - \alpha \frac{\partial \ell}{\partial W_1}$$The learning rate $\alpha$ is set such that the weight update steps are small enough that the iterative method settles in a minimum. Losses in PyTorchLet's start by seeing how we calculate the loss with PyTorch. Through the `nn` module, PyTorch provides losses such as the cross-entropy loss (`nn.CrossEntropyLoss`). You'll usually see the loss assigned to `criterion`. As noted in the last part, with a classification problem such as MNIST, we're using the softmax function to predict class probabilities. With a softmax output, you want to use cross-entropy as the loss. To actually calculate the loss, you first define the criterion then pass in the output of your network and the correct labels.Something really important to note here. Looking at [the documentation for `nn.CrossEntropyLoss`](https://pytorch.org/docs/stable/nn.htmltorch.nn.CrossEntropyLoss),> This criterion combines `nn.LogSoftmax()` and `nn.NLLLoss()` in one single class.>> The input is expected to contain scores for each class.This means we need to pass in the raw output of our network into the loss, not the output of the softmax function. This raw output is usually called the *logits* or *scores*. We use the logits because softmax gives you probabilities which will often be very close to zero or one but floating-point numbers can't accurately represent values near zero or one ([read more here](https://docs.python.org/3/tutorial/floatingpoint.html)). It's usually best to avoid doing calculations with probabilities, typically we use log-probabilities.
###Code
import torch
from torch import nn
import torch.nn.functional as F
from torchvision import datasets, transforms
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,)),
])
# Download and load the training data
trainset = datasets.MNIST('~/.pytorch/MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
###Output
_____no_output_____
###Markdown
NoteIf you haven't seen `nn.Sequential` yet, please finish the end of the Part 2 notebook.
###Code
# Build a feed-forward network
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10))
# Define the loss
criterion = nn.CrossEntropyLoss()
# Get our data
images, labels = next(iter(trainloader))
# Flatten images
images = images.view(images.shape[0], -1)
# Forward pass, get our logits
logits = model(images)
# Calculate the loss with the logits and the labels
loss = criterion(logits, labels)
print(loss)
###Output
tensor(2.3212, grad_fn=<NllLossBackward>)
###Markdown
In my experience it's more convenient to build the model with a log-softmax output using `nn.LogSoftmax` or `F.log_softmax` ([documentation](https://pytorch.org/docs/stable/nn.htmltorch.nn.LogSoftmax)). Then you can get the actual probabilities by taking the exponential `torch.exp(output)`. With a log-softmax output, you want to use the negative log likelihood loss, `nn.NLLLoss` ([documentation](https://pytorch.org/docs/stable/nn.htmltorch.nn.NLLLoss)).>**Exercise:** Build a model that returns the log-softmax as the output and calculate the loss using the negative log likelihood loss. Note that for `nn.LogSoftmax` and `F.log_softmax` you'll need to set the `dim` keyword argument appropriately. `dim=0` calculates softmax across the rows, so each column sums to 1, while `dim=1` calculates across the columns so each row sums to 1. Think about what you want the output to be and choose `dim` appropriately.
###Code
# TODO: Build a feed-forward network
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10),
nn.LogSoftmax(dim=1))
# TODO: Define the loss
criterion = nn.NLLLoss()
### Run this to check your work
# Get our data
images, labels = next(iter(trainloader))
# Flatten images
images = images.view(images.shape[0], -1)
# Forward pass, get our logits
log_probability = model(images)
print(log_probability.shape)
print(labels.shape)
# Calculate the loss with the logits and the labels
loss = criterion(log_probability, labels)
print(loss)
###Output
torch.Size([64, 10])
torch.Size([64])
tensor(2.2920, grad_fn=<NllLossBackward>)
###Markdown
AutogradNow that we know how to calculate a loss, how do we use it to perform backpropagation? Torch provides a module, `autograd`, for automatically calculating the gradients of tensors. We can use it to calculate the gradients of all our parameters with respect to the loss. Autograd works by keeping track of operations performed on tensors, then going backwards through those operations, calculating gradients along the way. To make sure PyTorch keeps track of operations on a tensor and calculates the gradients, you need to set `requires_grad = True` on a tensor. You can do this at creation with the `requires_grad` keyword, or at any time with `x.requires_grad_(True)`.You can turn off gradients for a block of code with the `torch.no_grad()` content:```pythonx = torch.zeros(1, requires_grad=True)>>> with torch.no_grad():... y = x * 2>>> y.requires_gradFalse```Also, you can turn on or off gradients altogether with `torch.set_grad_enabled(True|False)`.The gradients are computed with respect to some variable `z` with `z.backward()`. This does a backward pass through the operations that created `z`.
###Code
x = torch.randn(2,2, requires_grad=True)
print(x)
y = x**2
print(y)
###Output
tensor([[0.0050, 0.0085],
[1.1823, 1.5758]], grad_fn=<PowBackward0>)
###Markdown
Below we can see the operation that created `y`, a power operation `PowBackward0`.
###Code
## grad_fn shows the function that generated this variable
print(y.grad_fn)
###Output
<PowBackward0 object at 0x11e6ccbe0>
###Markdown
The autograd module keeps track of these operations and knows how to calculate the gradient for each one. In this way, it's able to calculate the gradients for a chain of operations, with respect to any one tensor. Let's reduce the tensor `y` to a scalar value, the mean.
###Code
z = y.mean()
print(z)
###Output
tensor(0.6929, grad_fn=<MeanBackward0>)
###Markdown
You can check the gradients for `x` and `y` but they are empty currently.
###Code
print(x.grad)
###Output
None
###Markdown
To calculate the gradients, you need to run the `.backward` method on a Variable, `z` for example. This will calculate the gradient for `z` with respect to `x`$$\frac{\partial z}{\partial x} = \frac{\partial}{\partial x}\left[\frac{1}{n}\sum_i^n x_i^2\right] = \frac{x}{2}$$
###Code
z.backward()
print(x.grad)
print(x/2)
###Output
tensor([[0.0352, 0.0461],
[0.5437, 0.6277]])
tensor([[0.0352, 0.0461],
[0.5437, 0.6277]], grad_fn=<DivBackward0>)
###Markdown
These gradients calculations are particularly useful for neural networks. For training we need the gradients of the cost with respect to the weights. With PyTorch, we run data forward through the network to calculate the loss, then, go backwards to calculate the gradients with respect to the loss. Once we have the gradients we can make a gradient descent step. Loss and Autograd togetherWhen we create a network with PyTorch, all of the parameters are initialized with `requires_grad = True`. This means that when we calculate the loss and call `loss.backward()`, the gradients for the parameters are calculated. These gradients are used to update the weights with gradient descent. Below you can see an example of calculating the gradients using a backwards pass.
###Code
# Build a feed-forward network
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10),
nn.LogSoftmax(dim=1))
criterion = nn.NLLLoss()
images, labels = next(iter(trainloader))
images = images.view(images.shape[0], -1)
logits = model(images)
loss = criterion(logits, labels)
print('Before backward pass: \n', model[0].weight.grad)
loss.backward()
print('After backward pass: \n', model[0].weight.grad)
###Output
Before backward pass:
None
After backward pass:
tensor([[-1.8062e-03, -1.8062e-03, -1.8062e-03, ..., -1.8062e-03,
-1.8062e-03, -1.8062e-03],
[-1.2469e-03, -1.2469e-03, -1.2469e-03, ..., -1.2469e-03,
-1.2469e-03, -1.2469e-03],
[-4.6217e-04, -4.6217e-04, -4.6217e-04, ..., -4.6217e-04,
-4.6217e-04, -4.6217e-04],
...,
[-4.0790e-05, -4.0790e-05, -4.0790e-05, ..., -4.0790e-05,
-4.0790e-05, -4.0790e-05],
[ 2.4696e-03, 2.4696e-03, 2.4696e-03, ..., 2.4696e-03,
2.4696e-03, 2.4696e-03],
[ 1.1100e-03, 1.1100e-03, 1.1100e-03, ..., 1.1100e-03,
1.1100e-03, 1.1100e-03]])
###Markdown
Training the network!There's one last piece we need to start training, an optimizer that we'll use to update the weights with the gradients. We get these from PyTorch's [`optim` package](https://pytorch.org/docs/stable/optim.html). For example we can use stochastic gradient descent with `optim.SGD`. You can see how to define an optimizer below.
###Code
from torch import optim
# Optimizers require the parameters to optimize and a learning rate
optimizer = optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
Now we know how to use all the individual parts so it's time to see how they work together. Let's consider just one learning step before looping through all the data. The general process with PyTorch:* Make a forward pass through the network * Use the network output to calculate the loss* Perform a backward pass through the network with `loss.backward()` to calculate the gradients* Take a step with the optimizer to update the weightsBelow I'll go through one training step and print out the weights and gradients so you can see how it changes. Note that I have a line of code `optimizer.zero_grad()`. When you do multiple backwards passes with the same parameters, the gradients are accumulated. This means that you need to zero the gradients on each training pass or you'll retain gradients from previous training batches.
###Code
print('Initial weights - ', model[0].weight)
images, labels = next(iter(trainloader))
images.resize_(64, 784)
# Clear the gradients, do this because gradients are accumulated
optimizer.zero_grad()
# Forward pass, then backward pass, then update weights
output = model(images)
loss = criterion(output, labels)
loss.backward()
print('Gradient -', model[0].weight.grad)
# Take an update step and few the new weights
optimizer.step()
print('Updated weights - ', model[0].weight)
###Output
Updated weights - Parameter containing:
tensor([[-0.0207, 0.0310, 0.0053, ..., -0.0335, -0.0025, -0.0348],
[ 0.0106, -0.0262, -0.0019, ..., -0.0139, 0.0001, -0.0249],
[ 0.0303, 0.0075, -0.0015, ..., -0.0257, 0.0068, 0.0310],
...,
[-0.0343, -0.0120, -0.0244, ..., 0.0106, 0.0103, 0.0286],
[-0.0261, -0.0051, 0.0035, ..., 0.0132, 0.0346, 0.0343],
[ 0.0351, -0.0272, 0.0218, ..., 0.0261, -0.0181, -0.0266]],
requires_grad=True)
###Markdown
Training for realNow we'll put this algorithm into a loop so we can go through all the images. Some nomenclature, one pass through the entire dataset is called an *epoch*. So here we're going to loop through `trainloader` to get our training batches. For each batch, we'll doing a training pass where we calculate the loss, do a backwards pass, and update the weights.>**Exercise:** Implement the training pass for our network. If you implemented it correctly, you should see the training loss drop with each epoch.
###Code
## Your solution here
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10),
nn.LogSoftmax(dim=1))
criterion = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(), lr=0.003)
epochs = 5
for e in range(epochs):
running_loss = 0
for images, labels in trainloader:
optimizer.zero_grad()
# Flatten MNIST images into a 784 long vector
images = images.view(images.shape[0], -1)
# TODO: Training pass
logps = model(images)
loss = criterion(logps, labels)
loss.backward()
running_loss += loss.item()
optimizer.step()
else:
print(f"Training loss: {running_loss/len(trainloader)}")
###Output
Training loss: 1.938276565151174
Training loss: 0.9034158332642716
Training loss: 0.5399593679286016
Training loss: 0.43678147674623585
Training loss: 0.39015633042560205
###Markdown
With the network trained, we can check out it's predictions.
###Code
%matplotlib inline
import helper
images, labels = next(iter(trainloader))
img = images[0].view(1, 784)
# Turn off gradients to speed up this part
with torch.no_grad():
logps = model(img)
# Output of the network are log-probabilities, need to take exponential for probabilities
ps = torch.exp(logps)
helper.view_classify(img.view(1, 28, 28), ps)
###Output
_____no_output_____
###Markdown
Training Neural NetworksThe network we built in the previous part isn't so smart, it doesn't know anything about our handwritten digits. Neural networks with non-linear activations work like universal function approximators. There is some function that maps your input to the output. For example, images of handwritten digits to class probabilities. The power of neural networks is that we can train them to approximate this function, and basically any function given enough data and compute time.At first the network is naive, it doesn't know the function mapping the inputs to the outputs. We train the network by showing it examples of real data, then adjusting the network parameters such that it approximates this function.To find these parameters, we need to know how poorly the network is predicting the real outputs. For this we calculate a **loss function** (also called the cost), a measure of our prediction error. For example, the mean squared loss is often used in regression and binary classification problems$$\large \ell = \frac{1}{2n}\sum_i^n{\left(y_i - \hat{y}_i\right)^2}$$where $n$ is the number of training examples, $y_i$ are the true labels, and $\hat{y}_i$ are the predicted labels.By minimizing this loss with respect to the network parameters, we can find configurations where the loss is at a minimum and the network is able to predict the correct labels with high accuracy. We find this minimum using a process called **gradient descent**. The gradient is the slope of the loss function and points in the direction of fastest change. To get to the minimum in the least amount of time, we then want to follow the gradient (downwards). You can think of this like descending a mountain by following the steepest slope to the base. BackpropagationFor single layer networks, gradient descent is straightforward to implement. However, it's more complicated for deeper, multilayer neural networks like the one we've built. Complicated enough that it took about 30 years before researchers figured out how to train multilayer networks.Training multilayer networks is done through **backpropagation** which is really just an application of the chain rule from calculus. It's easiest to understand if we convert a two layer network into a graph representation.In the forward pass through the network, our data and operations go from bottom to top here. We pass the input $x$ through a linear transformation $L_1$ with weights $W_1$ and biases $b_1$. The output then goes through the sigmoid operation $S$ and another linear transformation $L_2$. Finally we calculate the loss $\ell$. We use the loss as a measure of how bad the network's predictions are. The goal then is to adjust the weights and biases to minimize the loss.To train the weights with gradient descent, we propagate the gradient of the loss backwards through the network. Each operation has some gradient between the inputs and outputs. As we send the gradients backwards, we multiply the incoming gradient with the gradient for the operation. Mathematically, this is really just calculating the gradient of the loss with respect to the weights using the chain rule.$$\large \frac{\partial \ell}{\partial W_1} = \frac{\partial L_1}{\partial W_1} \frac{\partial S}{\partial L_1} \frac{\partial L_2}{\partial S} \frac{\partial \ell}{\partial L_2}$$**Note:** I'm glossing over a few details here that require some knowledge of vector calculus, but they aren't necessary to understand what's going on.We update our weights using this gradient with some learning rate $\alpha$. $$\large W^\prime_1 = W_1 - \alpha \frac{\partial \ell}{\partial W_1}$$The learning rate $\alpha$ is set such that the weight update steps are small enough that the iterative method settles in a minimum. Losses in PyTorchLet's start by seeing how we calculate the loss with PyTorch. Through the `nn` module, PyTorch provides losses such as the cross-entropy loss (`nn.CrossEntropyLoss`). You'll usually see the loss assigned to `criterion`. As noted in the last part, with a classification problem such as MNIST, we're using the softmax function to predict class probabilities. With a softmax output, you want to use cross-entropy as the loss. To actually calculate the loss, you first define the criterion then pass in the output of your network and the correct labels.Something really important to note here. Looking at [the documentation for `nn.CrossEntropyLoss`](https://pytorch.org/docs/stable/nn.htmltorch.nn.CrossEntropyLoss),> This criterion combines `nn.LogSoftmax()` and `nn.NLLLoss()` in one single class.>> The input is expected to contain scores for each class.This means we need to pass in the raw output of our network into the loss, not the output of the softmax function. This raw output is usually called the *logits* or *scores*. We use the logits because softmax gives you probabilities which will often be very close to zero or one but floating-point numbers can't accurately represent values near zero or one ([read more here](https://docs.python.org/3/tutorial/floatingpoint.html)). It's usually best to avoid doing calculations with probabilities, typically we use log-probabilities.
###Code
import torch
from torch import nn
import torch.nn.functional as F
from torchvision import datasets, transforms
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,)),
])
# Download and load the training data
trainset = datasets.MNIST('~/.pytorch/MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
###Output
_____no_output_____
###Markdown
NoteIf you haven't seen `nn.Sequential` yet, please finish the end of the Part 2 notebook.
###Code
# Build a feed-forward network
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10))
# Define the loss
criterion = nn.CrossEntropyLoss()
# Get our data
images, labels = next(iter(trainloader))
# Flatten images
images = images.view(images.shape[0], -1)
# Forward pass, get our logits
logits = model(images)
# Calculate the loss with the logits and the labels
loss = criterion(logits, labels)
print(loss)
###Output
tensor(2.3258, grad_fn=<NllLossBackward>)
###Markdown
In my experience it's more convenient to build the model with a log-softmax output using `nn.LogSoftmax` or `F.log_softmax` ([documentation](https://pytorch.org/docs/stable/nn.htmltorch.nn.LogSoftmax)). Then you can get the actual probabilities by taking the exponential `torch.exp(output)`. With a log-softmax output, you want to use the negative log likelihood loss, `nn.NLLLoss` ([documentation](https://pytorch.org/docs/stable/nn.htmltorch.nn.NLLLoss)).>**Exercise:** Build a model that returns the log-softmax as the output and calculate the loss using the negative log likelihood loss. Note that for `nn.LogSoftmax` and `F.log_softmax` you'll need to set the `dim` keyword argument appropriately. `dim=0` calculates softmax across the rows, so each column sums to 1, while `dim=1` calculates across the columns so each row sums to 1. Think about what you want the output to be and choose `dim` appropriately.
###Code
# TODO: Build a feed-forward network
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10),
nn.LogSoftmax(dim=1))
# TODO: Define the loss
criterion = nn.NLLLoss()
### Run this to check your work
# Get our data
images, labels = next(iter(trainloader))
# Flatten images
images = images.view(images.shape[0], -1)
# Forward pass, get our logits
logits = model(images)
# Calculate the loss with the logits and the labels
loss = criterion(logits, labels)
print(loss)
###Output
tensor(2.2971, grad_fn=<NllLossBackward>)
###Markdown
AutogradNow that we know how to calculate a loss, how do we use it to perform backpropagation? Torch provides a module, `autograd`, for automatically calculating the gradients of tensors. We can use it to calculate the gradients of all our parameters with respect to the loss. Autograd works by keeping track of operations performed on tensors, then going backwards through those operations, calculating gradients along the way. To make sure PyTorch keeps track of operations on a tensor and calculates the gradients, you need to set `requires_grad = True` on a tensor. You can do this at creation with the `requires_grad` keyword, or at any time with `x.requires_grad_(True)`.You can turn off gradients for a block of code with the `torch.no_grad()` content:```pythonx = torch.zeros(1, requires_grad=True)>>> with torch.no_grad():... y = x * 2>>> y.requires_gradFalse```Also, you can turn on or off gradients altogether with `torch.set_grad_enabled(True|False)`.The gradients are computed with respect to some variable `z` with `z.backward()`. This does a backward pass through the operations that created `z`.
###Code
x = torch.randn(2,2, requires_grad=True)
print(x)
y = x**2
print(y)
###Output
tensor([[1.4862, 1.0739],
[0.1446, 0.1883]], grad_fn=<PowBackward0>)
###Markdown
Below we can see the operation that created `y`, a power operation `PowBackward0`.
###Code
## grad_fn shows the function that generated this variable
print(y.grad_fn)
###Output
<PowBackward0 object at 0x00000254FC0994E0>
###Markdown
The autograd module keeps track of these operations and knows how to calculate the gradient for each one. In this way, it's able to calculate the gradients for a chain of operations, with respect to any one tensor. Let's reduce the tensor `y` to a scalar value, the mean.
###Code
# np.add([1.4862, 1.0739, 0.1446, 0.1883])
(1.4862+1.0739+0.1446+0.1883)/4
z = y.mean()
print(z)
###Output
tensor(0.7232, grad_fn=<MeanBackward0>)
###Markdown
You can check the gradients for `x` and `y` but they are empty currently.
###Code
print(x.grad)
print(y.grad)
###Output
None
None
###Markdown
To calculate the gradients, you need to run the `.backward` method on a Variable, `z` for example. This will calculate the gradient for `z` with respect to `x`$$\frac{\partial z}{\partial x} = \frac{\partial}{\partial x}\left[\frac{1}{n}\sum_i^n x_i^2\right] = \frac{x}{2}$$
###Code
z.backward()
print(x.grad)
print(x/2)
###Output
tensor([[0.2509, 0.3455],
[1.2021, 0.3092]])
tensor([[0.2509, 0.3455],
[1.2021, 0.3092]], grad_fn=<DivBackward0>)
###Markdown
These gradients calculations are particularly useful for neural networks. For training we need the gradients of the cost with respect to the weights. With PyTorch, we run data forward through the network to calculate the loss, then, go backwards to calculate the gradients with respect to the loss. Once we have the gradients we can make a gradient descent step. Loss and Autograd togetherWhen we create a network with PyTorch, all of the parameters are initialized with `requires_grad = True`. This means that when we calculate the loss and call `loss.backward()`, the gradients for the parameters are calculated. These gradients are used to update the weights with gradient descent. Below you can see an example of calculating the gradients using a backwards pass.
###Code
# Build a feed-forward network
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10),
nn.LogSoftmax(dim=1))
criterion = nn.NLLLoss()
images, labels = next(iter(trainloader))
images = images.view(images.shape[0], -1)
logits = model(images)
loss = criterion(logits, labels)
print('Before backward pass: \n', model[0].weight.grad)
loss.backward()
print('After backward pass: \n', model[0].weight.grad)
###Output
Before backward pass:
None
After backward pass:
tensor([[ 1.8489e-03, 1.8489e-03, 1.8489e-03, ..., 1.8489e-03,
1.8489e-03, 1.8489e-03],
[ 5.4776e-04, 5.4776e-04, 5.4776e-04, ..., 5.4776e-04,
5.4776e-04, 5.4776e-04],
[-1.9275e-04, -1.9275e-04, -1.9275e-04, ..., -1.9275e-04,
-1.9275e-04, -1.9275e-04],
...,
[-2.0732e-03, -2.0732e-03, -2.0732e-03, ..., -2.0732e-03,
-2.0732e-03, -2.0732e-03],
[-2.8996e-05, -2.8996e-05, -2.8996e-05, ..., -2.8996e-05,
-2.8996e-05, -2.8996e-05],
[-1.2210e-03, -1.2210e-03, -1.2210e-03, ..., -1.2210e-03,
-1.2210e-03, -1.2210e-03]])
###Markdown
Training the network!There's one last piece we need to start training, an optimizer that we'll use to update the weights with the gradients. We get these from PyTorch's [`optim` package](https://pytorch.org/docs/stable/optim.html). For example we can use stochastic gradient descent with `optim.SGD`. You can see how to define an optimizer below.
###Code
from torch import optim
# Optimizers require the parameters to optimize and a learning rate
optimizer = optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
Now we know how to use all the individual parts so it's time to see how they work together. Let's consider just one learning step before looping through all the data. The general process with PyTorch:* Make a forward pass through the network * Use the network output to calculate the loss* Perform a backward pass through the network with `loss.backward()` to calculate the gradients* Take a step with the optimizer to update the weightsBelow I'll go through one training step and print out the weights and gradients so you can see how it changes. Note that I have a line of code `optimizer.zero_grad()`. When you do multiple backwards passes with the same parameters, the gradients are accumulated. This means that you need to zero the gradients on each training pass or you'll retain gradients from previous training batches.
###Code
print('Initial weights - ', model[0].weight)
images, labels = next(iter(trainloader))
images.resize_(64, 784)
# Clear the gradients, do this because gradients are accumulated
optimizer.zero_grad()
# Forward pass, then backward pass, then update weights
output = model(images)
loss = criterion(output, labels)
loss.backward()
print('Gradient -', model[0].weight.grad)
# Take an update step and few the new weights
optimizer.step()
print('Updated weights - ', model[0].weight)
###Output
Updated weights - Parameter containing:
tensor([[ 0.0325, -0.0216, -0.0313, ..., 0.0218, 0.0076, 0.0144],
[ 0.0234, 0.0155, -0.0228, ..., -0.0206, -0.0134, -0.0208],
[ 0.0195, 0.0148, 0.0339, ..., -0.0154, -0.0111, 0.0011],
...,
[-0.0051, 0.0004, -0.0161, ..., 0.0006, 0.0134, 0.0150],
[ 0.0351, 0.0339, -0.0262, ..., 0.0042, 0.0161, -0.0164],
[ 0.0332, -0.0067, -0.0164, ..., 0.0100, 0.0003, -0.0245]],
requires_grad=True)
###Markdown
Training for realNow we'll put this algorithm into a loop so we can go through all the images. Some nomenclature, one pass through the entire dataset is called an *epoch*. So here we're going to loop through `trainloader` to get our training batches. For each batch, we'll doing a training pass where we calculate the loss, do a backwards pass, and update the weights.>**Exercise:** Implement the training pass for our network. If you implemented it correctly, you should see the training loss drop with each epoch.
###Code
## Your solution here
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10),
nn.LogSoftmax(dim=1))
criterion = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(), lr=0.003)
epochs = 5
for e in range(epochs):
running_loss = 0
for images, labels in trainloader:
# Flatten MNIST images into a 784 long vector
images = images.view(images.shape[0], -1)
# TODO: Training pass
# Clear the gradients, do this because gradients are accumulated
optimizer.zero_grad()
# Forward pass, then backward pass, then update weights
output = model(images)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
else:
print(f"Training loss: {running_loss/len(trainloader)}")
###Output
Training loss: 1.852432868246839
Training loss: 0.8031121633454427
Training loss: 0.5059089643487544
Training loss: 0.42035464716872684
Training loss: 0.3796419174527563
###Markdown
With the network trained, we can check out it's predictions.
###Code
%matplotlib inline
import helper
images, labels = next(iter(trainloader))
img = images[0].view(1, 784)
# Turn off gradients to speed up this part
with torch.no_grad():
logps = model(img)
# Output of the network are log-probabilities, need to take exponential for probabilities
ps = torch.exp(logps)
helper.view_classify(img.view(1, 28, 28), ps)
###Output
_____no_output_____
###Markdown
Training Neural NetworksThe network we built in the previous part isn't so smart, it doesn't know anything about our handwritten digits. Neural networks with non-linear activations work like universal function approximators. There is some function that maps your input to the output. For example, images of handwritten digits to class probabilities. The power of neural networks is that we can train them to approximate this function, and basically any function given enough data and compute time.At first the network is naive, it doesn't know the function mapping the inputs to the outputs. We train the network by showing it examples of real data, then adjusting the network parameters such that it approximates this function.To find these parameters, we need to know how poorly the network is predicting the real outputs. For this we calculate a **loss function** (also called the cost), a measure of our prediction error. For example, the mean squared loss is often used in regression and binary classification problems$$\large \ell = \frac{1}{2n}\sum_i^n{\left(y_i - \hat{y}_i\right)^2}$$where $n$ is the number of training examples, $y_i$ are the true labels, and $\hat{y}_i$ are the predicted labels.By minimizing this loss with respect to the network parameters, we can find configurations where the loss is at a minimum and the network is able to predict the correct labels with high accuracy. We find this minimum using a process called **gradient descent**. The gradient is the slope of the loss function and points in the direction of fastest change. To get to the minimum in the least amount of time, we then want to follow the gradient (downwards). You can think of this like descending a mountain by following the steepest slope to the base. BackpropagationFor single layer networks, gradient descent is straightforward to implement. However, it's more complicated for deeper, multilayer neural networks like the one we've built. Complicated enough that it took about 30 years before researchers figured out how to train multilayer networks.Training multilayer networks is done through **backpropagation** which is really just an application of the chain rule from calculus. It's easiest to understand if we convert a two layer network into a graph representation.In the forward pass through the network, our data and operations go from bottom to top here. We pass the input $x$ through a linear transformation $L_1$ with weights $W_1$ and biases $b_1$. The output then goes through the sigmoid operation $S$ and another linear transformation $L_2$. Finally we calculate the loss $\ell$. We use the loss as a measure of how bad the network's predictions are. The goal then is to adjust the weights and biases to minimize the loss.To train the weights with gradient descent, we propagate the gradient of the loss backwards through the network. Each operation has some gradient between the inputs and outputs. As we send the gradients backwards, we multiply the incoming gradient with the gradient for the operation. Mathematically, this is really just calculating the gradient of the loss with respect to the weights using the chain rule.$$\large \frac{\partial \ell}{\partial W_1} = \frac{\partial L_1}{\partial W_1} \frac{\partial S}{\partial L_1} \frac{\partial L_2}{\partial S} \frac{\partial \ell}{\partial L_2}$$**Note:** I'm glossing over a few details here that require some knowledge of vector calculus, but they aren't necessary to understand what's going on.We update our weights using this gradient with some learning rate $\alpha$. $$\large W^\prime_1 = W_1 - \alpha \frac{\partial \ell}{\partial W_1}$$The learning rate $\alpha$ is set such that the weight update steps are small enough that the iterative method settles in a minimum. Losses in PyTorchLet's start by seeing how we calculate the loss with PyTorch. Through the `nn` module, PyTorch provides losses such as the cross-entropy loss (`nn.CrossEntropyLoss`). You'll usually see the loss assigned to `criterion`. As noted in the last part, with a classification problem such as MNIST, we're using the softmax function to predict class probabilities. With a softmax output, you want to use cross-entropy as the loss. To actually calculate the loss, you first define the criterion then pass in the output of your network and the correct labels.Something really important to note here. Looking at [the documentation for `nn.CrossEntropyLoss`](https://pytorch.org/docs/stable/nn.htmltorch.nn.CrossEntropyLoss),> This criterion combines `nn.LogSoftmax()` and `nn.NLLLoss()` in one single class.>> The input is expected to contain scores for each class.This means we need to pass in the raw output of our network into the loss, not the output of the softmax function. This raw output is usually called the *logits* or *scores*. We use the logits because softmax gives you probabilities which will often be very close to zero or one but floating-point numbers can't accurately represent values near zero or one ([read more here](https://docs.python.org/3/tutorial/floatingpoint.html)). It's usually best to avoid doing calculations with probabilities, typically we use log-probabilities.
###Code
import torch
from torch import nn
import torch.nn.functional as F
from torchvision import datasets, transforms
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
# Download and load the training data
trainset = datasets.MNIST('~/.pytorch/MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
###Output
_____no_output_____
###Markdown
NoteIf you haven't seen `nn.Sequential` yet, please finish the end of the Part 2 notebook.
###Code
# Build a feed-forward network
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10))
# Define the loss
criterion = nn.CrossEntropyLoss()
# Get our data
images, labels = next(iter(trainloader))
# Flatten images
images = images.view(images.shape[0], -1)
# Forward pass, get our logits
logits = model(images)
# Calculate the loss with the logits and the labels
loss = criterion(logits, labels)
print(loss)
###Output
tensor(2.3009, grad_fn=<NllLossBackward>)
###Markdown
In my experience it's more convenient to build the model with a log-softmax output using `nn.LogSoftmax` or `F.log_softmax` ([documentation](https://pytorch.org/docs/stable/nn.htmltorch.nn.LogSoftmax)). Then you can get the actual probabilities by taking the exponential `torch.exp(output)`. With a log-softmax output, you want to use the negative log likelihood loss, `nn.NLLLoss` ([documentation](https://pytorch.org/docs/stable/nn.htmltorch.nn.NLLLoss)).>**Exercise:** Build a model that returns the log-softmax as the output and calculate the loss using the negative log likelihood loss. Note that for `nn.LogSoftmax` and `F.log_softmax` you'll need to set the `dim` keyword argument appropriately. `dim=0` calculates softmax across the rows, so each column sums to 1, while `dim=1` calculates across the columns so each row sums to 1. Think about what you want the output to be and choose `dim` appropriately.
###Code
# TODO: Build a feed-forward network
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10),
nn.LogSoftmax(dim=1))
# TODO: Define the loss
criterion = nn.CrossEntropyLoss()
### Run this to check your work
# Get our data
images, labels = next(iter(trainloader))
# Flatten images
images = images.view(images.shape[0], -1)
# Forward pass, get our logits
logits = model(images)
# Calculate the loss with the logits and the labels
loss = criterion(logits, labels)
print(loss)
###Output
tensor(2.2881, grad_fn=<NllLossBackward>)
###Markdown
AutogradNow that we know how to calculate a loss, how do we use it to perform backpropagation? Torch provides a module, `autograd`, for automatically calculating the gradients of tensors. We can use it to calculate the gradients of all our parameters with respect to the loss. Autograd works by keeping track of operations performed on tensors, then going backwards through those operations, calculating gradients along the way. To make sure PyTorch keeps track of operations on a tensor and calculates the gradients, you need to set `requires_grad = True` on a tensor. You can do this at creation with the `requires_grad` keyword, or at any time with `x.requires_grad_(True)`.You can turn off gradients for a block of code with the `torch.no_grad()` content:```pythonx = torch.zeros(1, requires_grad=True)>>> with torch.no_grad():... y = x * 2>>> y.requires_gradFalse```Also, you can turn on or off gradients altogether with `torch.set_grad_enabled(True|False)`.The gradients are computed with respect to some variable `z` with `z.backward()`. This does a backward pass through the operations that created `z`.
###Code
x = torch.randn(2,2, requires_grad=True)
print(x)
y = x**2
print(y)
###Output
tensor([[0.0880, 0.3466],
[0.1477, 1.4294]], grad_fn=<PowBackward0>)
###Markdown
Below we can see the operation that created `y`, a power operation `PowBackward0`.
###Code
## grad_fn shows the function that generated this variable
print(y.grad_fn)
###Output
<PowBackward0 object at 0x10f233d68>
###Markdown
The autgrad module keeps track of these operations and knows how to calculate the gradient for each one. In this way, it's able to calculate the gradients for a chain of operations, with respect to any one tensor. Let's reduce the tensor `y` to a scalar value, the mean.
###Code
z = y.mean()
print(z)
###Output
tensor(0.5029, grad_fn=<MeanBackward1>)
###Markdown
You can check the gradients for `x` and `y` but they are empty currently.
###Code
print(x.grad)
###Output
None
###Markdown
To calculate the gradients, you need to run the `.backward` method on a Variable, `z` for example. This will calculate the gradient for `z` with respect to `x`$$\frac{\partial z}{\partial x} = \frac{\partial}{\partial x}\left[\frac{1}{n}\sum_i^n x_i^2\right] = \frac{x}{2}$$
###Code
z.backward()
print(x.grad)
print(x/2)
###Output
tensor([[ 0.1483, -0.2944],
[-0.1921, 0.5978]])
tensor([[ 0.1483, -0.2944],
[-0.1921, 0.5978]], grad_fn=<DivBackward0>)
###Markdown
These gradients calculations are particularly useful for neural networks. For training we need the gradients of the cost with respect to the weights. With PyTorch, we run data forward through the network to calculate the loss, then, go backwards to calculate the gradients with respect to the loss. Once we have the gradients we can make a gradient descent step. Loss and Autograd togetherWhen we create a network with PyTorch, all of the parameters are initialized with `requires_grad = True`. This means that when we calculate the loss and call `loss.backward()`, the gradients for the parameters are calculated. These gradients are used to update the weights with gradient descent. Below you can see an example of calculating the gradients using a backwards pass.
###Code
# Build a feed-forward network
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10),
nn.LogSoftmax(dim=1))
criterion = nn.NLLLoss()
images, labels = next(iter(trainloader))
images = images.view(images.shape[0], -1)
logits = model(images)
loss = criterion(logits, labels)
print('Before backward pass: \n', model[0].weight.grad)
loss.backward()
print('After backward pass: \n', model[0].weight.grad)
###Output
Before backward pass:
None
After backward pass:
tensor([[ 0.0011, 0.0011, 0.0011, ..., 0.0011, 0.0011, 0.0011],
[-0.0011, -0.0011, -0.0011, ..., -0.0011, -0.0011, -0.0011],
[-0.0009, -0.0009, -0.0009, ..., -0.0009, -0.0009, -0.0009],
...,
[ 0.0012, 0.0012, 0.0012, ..., 0.0012, 0.0012, 0.0012],
[ 0.0008, 0.0008, 0.0008, ..., 0.0008, 0.0008, 0.0008],
[-0.0017, -0.0017, -0.0017, ..., -0.0017, -0.0017, -0.0017]])
###Markdown
Training the network!There's one last piece we need to start training, an optimizer that we'll use to update the weights with the gradients. We get these from PyTorch's [`optim` package](https://pytorch.org/docs/stable/optim.html). For example we can use stochastic gradient descent with `optim.SGD`. You can see how to define an optimizer below.
###Code
from torch import optim
# Optimizers require the parameters to optimize and a learning rate
optimizer = optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
Now we know how to use all the individual parts so it's time to see how they work together. Let's consider just one learning step before looping through all the data. The general process with PyTorch:* Make a forward pass through the network * Use the network output to calculate the loss* Perform a backward pass through the network with `loss.backward()` to calculate the gradients* Take a step with the optimizer to update the weightsBelow I'll go through one training step and print out the weights and gradients so you can see how it changes. Note that I have a line of code `optimizer.zero_grad()`. When you do multiple backwards passes with the same parameters, the gradients are accumulated. This means that you need to zero the gradients on each training pass or you'll retain gradients from previous training batches.
###Code
print('Initial weights - ', model[0].weight)
images, labels = next(iter(trainloader))
images.resize_(64, 784)
# Clear the gradients, do this because gradients are accumulated
optimizer.zero_grad()
# Forward pass, then backward pass, then update weights
output = model(images)
loss = criterion(output, labels)
loss.backward()
print('Gradient -', model[0].weight.grad)
# Take an update step and few the new weights
optimizer.step()
print('Updated weights - ', model[0].weight)
###Output
Updated weights - Parameter containing:
tensor([[ 0.0155, 0.0034, 0.0092, ..., 0.0135, 0.0122, -0.0158],
[ 0.0171, 0.0078, 0.0119, ..., -0.0357, 0.0229, -0.0157],
[-0.0028, 0.0230, -0.0214, ..., 0.0212, -0.0127, -0.0320],
...,
[ 0.0230, -0.0148, 0.0265, ..., 0.0061, 0.0153, 0.0094],
[ 0.0348, 0.0286, -0.0075, ..., -0.0280, 0.0209, 0.0197],
[ 0.0028, 0.0311, 0.0129, ..., 0.0111, -0.0023, -0.0089]],
requires_grad=True)
###Markdown
Training for realNow we'll put this algorithm into a loop so we can go through all the images. Some nomenclature, one pass through the entire dataset is called an *epoch*. So here we're going to loop through `trainloader` to get our training batches. For each batch, we'll doing a training pass where we calculate the loss, do a backwards pass, and update the weights.>**Exercise:** Implement the training pass for our network. If you implemented it correctly, you should see the training loss drop with each epoch.
###Code
## Your solution here
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10),
nn.LogSoftmax(dim=1))
criterion = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(), lr=0.003)
epochs = 5
for e in range(epochs):
running_loss = 0
for images, labels in trainloader:
# Flatten MNIST images into a 784 long vector
images = images.view(images.shape[0], -1)
# TODO: Training pass
optimizer.zero_grad()
output = model(images)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
else:
print(f"Training loss: {running_loss/len(trainloader)}")
###Output
Training loss: 1.8471984935721864
Training loss: 0.8021559004526911
Training loss: 0.5096026420243768
Training loss: 0.42276712503832287
Training loss: 0.38030247838258235
###Markdown
With the network trained, we can check out it's predictions.
###Code
%matplotlib inline
import helper
images, labels = next(iter(trainloader))
img = images[0].view(1, 784)
# Turn off gradients to speed up this part
with torch.no_grad():
logps = model(img)
# Output of the network are log-probabilities, need to take exponential for probabilities
ps = torch.exp(logps)
helper.view_classify(img.view(1, 28, 28), ps)
###Output
_____no_output_____ |
Predict_Profit_of_New_Startup.ipynb | ###Markdown
HiMy name is **Gautam Kumar Jaiswal**. This is **my project "Sartup's Profit Predictor"** for **DSC HMRITM Mentee Program** in the guidance of **Mr. Depender kumar soni**.
###Code
#imported required libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
np.__version__
#loading data file
data = pd.read_csv('50_Startups.csv')
# displays number of columns and records/rows in dataset
#check if there is any missing data or not
data.info
data.describe()
features = data.iloc[:,:-1].values
label = data.iloc[:, 1].values
features
#converting the categorical features to numerical features as
#sklearn works only with numpy array
#Instead of label enconding and then onehotencoding,
#newer version directly works with onehotencoding using ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
transformer = ColumnTransformer(transformers=[
("OneHot", # Just a name
OneHotEncoder(), # The transformer class
[3] # The column(s) to be applied on.
)
],
remainder='passthrough' # donot apply anything to the remaining columns
)
features = transformer.fit_transform(features.tolist())
features
#converting an object to normal array
features = features.astype(float)
features
plt.hist(features)
#sampling the dataset
#normally 20% dataset is used for testing and 80% is used for training --> test_size=0.2 means 20%
#Training set will be used to train the model
#Creating Training and Testing sets
# Testing set will be used to test our model
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(features,
label,
test_size=0.2,
random_state=0)
#Create our model using Linear Regression
from sklearn.tree import DecisionTreeRegressor
DTR = DecisionTreeRegressor(max_depth=20, random_state=25)
DTR.fit(X_train,y_train)
#checking score of training as well as testing
print(DTR.predict(X_test))
print(DTR.score(X_train,y_train) * 100)
print(DTR.score(X_test,y_test) * 100)
plt.plot(X_train, y_train)
#Creating Model Using RandomForestRegressor
from sklearn.ensemble import RandomForestRegressor
for i in range(20,100):
RF=RandomForestRegressor(n_estimators=100, max_depth= 10)
RF.fit(X_train,y_train.ravel())
RF.predict(X_test)
print("n_estimator = ",i)
print("Training Score =",RF.score(X_train,y_train))
print("TEsting Score = ",RF.score(X_test,y_test))
#Creating Model Using Sklearn.Inear_model LinearRegression
from sklearn.linear_model import LinearRegression
reg= LinearRegression()
reg.fit(X_train, y_train)
y_pred = reg.predict(X_test)
reg.score(X_train, y_train)
reg.score(X_test, y_test)
df = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred})
# Difference between Actual Data and Own Predicted Data
df
# Final visualization
df.plot(kind="line")
df.plot(kind="bar")
plt.show()
###Output
_____no_output_____
###Markdown
As I see the working load and accuracy of Trainig and Testing Dataset, I have noticed the Decision Tree Regressor alogorithm predicted more accurate then other two algorithms.Accuracy of DeicisionTreeRegressor is : Training set: 100%Testing set: 97%Accuracy of RandomForestRegressor is :Training set: 98%Testing set: 96%Accuracy of LinearRegressor is :Training set: 95%Testing set: 93%
###Code
plt.hist(training_score)
plt.hist(test_score, alpha=0.5)
plt.legend(["training", "testing"])
plt.show()
from sklearn import metrics
print('Mean Absolute Error:',
metrics.mean_absolute_error(y_test, y_pred))
###Output
Mean Absolute Error: 3.92901711165905e-11
|
09_Computer_Vision/coin_prediction.ipynb | ###Markdown
Package version- tensorflow==2.2.0rc3- numpy==1.18.2- matplotlib==3.2.1- image==1.5.30- google==2.0.3 Import Tensorflow
###Code
import tensorflow
tensorflow.__version__
###Output
_____no_output_____
###Markdown
**Load any image**
###Code
# skimage.data has a lot of sample images to play around with
from skimage import data
import matplotlib.pyplot as plt
image = data.coins()
plt.imshow(image, cmap='gray')
###Output
_____no_output_____
###Markdown
**Import utilities needed for convolution**
###Code
from scipy.signal import convolve2d
###Output
_____no_output_____
###Markdown
**Find the 5X5 Guassian Blur kernel with sigma = 1.0 and convolve the above image with that kernel***Hint: You can create various Guassian kernel at http://dev.theomader.com/gaussian-kernel-calculator/*
###Code
import numpy as np
kernel = np.array([[0.003765,0.015019,0.023792,0.015019,0.003765],
[0.015019,0.059912,0.094907,0.059912,0.015019],
[0.023792,0.094907,0.150342,0.094907,0.023792],
[0.015019,0.059912,0.094907,0.059912,0.015019],
[0.003765,0.015019,0.023792,0.015019,0.003765]])
###Output
_____no_output_____
###Markdown
**Convole the guassian kernel with the image and use 'valid' convolution and show the result side by side**
###Code
from scipy.signal import convolve2d
blurred_image = convolve2d(image, kernel, mode = 'valid')
plt.imshow(blurred_image,cmap='gray')
plt.show()
plt.imshow(image,cmap='gray')
###Output
_____no_output_____
###Markdown
Build a CNN to classify 10 monkey species **Mounting Google Drive on to the Google Colab instance**
###Code
from google.colab import drive
drive.mount('/content/drive')
###Output
Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount("/content/drive", force_remount=True).
###Markdown
**Set the appropriate path for the datsaet zip provided**
###Code
images_path = "/content/drive/My Drive/monkeys_dataset.zip"
###Output
_____no_output_____
###Markdown
**Extracting the dataset.zip to the present working directory**
###Code
from zipfile import ZipFile
with ZipFile(images_path, 'r') as zip:
zip.extractall()
###Output
_____no_output_____
###Markdown
*Check the list of files in the pwd(present working directory) by running command 'ls' and ensure 'dataset' folder has been generated*
###Code
!ls
###Output
classifier.h5 classifier_weights.h5 dataset drive sample_data
###Markdown
**Build a Sequential CNN classifier with input shape as 64X64 and using three sets of Convoltutional + Pooling layers. You can additionally use Dropout in the fully connected layers. Make sure the final layer shape matches with the number of classes**
###Code
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
# Initialising the CNN classifier
classifier = Sequential()
# Add a Convolution layer with 32 kernels of 3X3 shape with activation function ReLU
classifier.add(Conv2D(32, (3, 3), input_shape = (64, 64, 3), activation = 'relu', padding = 'same'))
# Add a Max Pooling layer of size 2X2
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Add another Convolution layer with 32 kernels of 3X3 shape with activation function ReLU
classifier.add(Conv2D(32, (3, 3), activation = 'relu', padding = 'same'))
# Adding another pooling layer
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Add another Convolution layer with 32 kernels of 3X3 shape with activation function ReLU
classifier.add(Conv2D(32, (3, 3), activation = 'relu', padding = 'valid'))
# Adding another pooling layer
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Flattening the layer before fully connected layers
classifier.add(Flatten())
# Adding a fully connected layer with 512 neurons
classifier.add(Dense(units = 512, activation = 'relu'))
# Adding dropout with probability 0.5
classifier.add(Dropout(0.5))
# Adding a fully connected layer with 128 neurons
classifier.add(Dense(units = 128, activation = 'relu'))
# The final output layer with 10 neurons to predict the categorical classifcation
classifier.add(Dense(units = 10, activation = 'softmax'))
###Output
_____no_output_____
###Markdown
**Compile the CNN classifier with Adam optimizer (default Learning rate and other parameters)and Categorical Crossentropy as loss function and Accuracy as the metric to monitor** *Optionally you can use an optimizer with custom learning rate and passing it to the optimizer parameter of compile**Eg: tensorflow.keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)*
###Code
classifier.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
###Output
_____no_output_____
###Markdown
**Use ImageDataGenerator to create a test and training set data generators and use fit_generator() function to train the model** *ImageDataGenerator is a powerful preprocessing utility to generate training and testing data with common data augmentation techniques. It can also be used to generate training data from Images stored in hierarchical directory structuresFor more options of ImageDataGenerator go to https://keras.io/preprocessing/image/*
###Code
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# Create data generator for training data with data augmentation and normalizing all
# values by 255
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
# Setting training data generator's source directory
# Setting the target size to resize all the images to (64,64) as the model input layer expects 64X64 images
training_set = train_datagen.flow_from_directory('./dataset/train',
target_size = (64, 64),
batch_size = 32,
class_mode = 'categorical')
# Setting testing data generator's source directory
test_set = test_datagen.flow_from_directory('./dataset/test',
target_size = (64, 64),
batch_size = 32,
class_mode = 'categorical')
# There are 1098 training images and 272 test images in total
classifier.fit_generator(training_set,
steps_per_epoch = int(1098/32),
epochs = 1,
validation_data = test_set,
validation_steps = int(272/32))
###Output
Found 1098 images belonging to 10 classes.
Found 272 images belonging to 10 classes.
34/34 [==============================] - 38s 1s/step - loss: 2.2958 - accuracy: 0.1471 - val_loss: 2.2120 - val_accuracy: 0.2070
###Markdown
**save the model and its weights**
###Code
classifier.save('./classifier.h5')
classifier.save_weights('./classifier_weights.h5')
###Output
_____no_output_____
###Markdown
*Check the current directory if the weights have been saved*
###Code
!ls
###Output
classifier.h5 classifier_weights.h5 dataset drive sample_data
###Markdown
Testing the model **Load the pre-trained saved model and load the weights**
###Code
from tensorflow.keras.models import load_model
# Load the pre trained model from the HDF5 file saved previously
pretrained_model = load_model('./classifier.h5')
pretrained_model.load_weights('./classifier_weights.h5')
###Output
_____no_output_____
###Markdown
**Test the model on one single image from the test folders**
###Code
import cv2
test_image = cv2.imread('/content/dataset/test/n0/n000.jpg')
# Check if the size of the Image array is compatible with Keras model
print(test_image.shape)
# Reshape the image to 64x64x3
test_image = cv2.resize(test_image, (64, 64))
print(test_image.shape)
# If not compatible expand the dimensions to match with the Keras Input
test_image = np.expand_dims(test_image, axis = 0)
test_image =test_image*1/255.0
#Check the size of the Image array again
print('After expand_dims: '+ str(test_image.shape))
#Predict the result of the test image
result = classifier.predict(test_image)
# Check the indices Image Data Generator has allotted to each folder
classes_dict = training_set.class_indices
print(classes_dict)
# Creating a list of classes in test set for showing the result as the folder name
prediction_class = []
for class_name,index in classes_dict.items():
prediction_class.append(class_name)
print(result[0])
# Index of the class with maximum probability
predicted_index = np.argmax(result[0])
# Print the name of the class
print(prediction_class[predicted_index])
###Output
(331, 500, 3)
(64, 64, 3)
After expand_dims: (1, 64, 64, 3)
{'n0': 0, 'n1': 1, 'n2': 2, 'n3': 3, 'n4': 4, 'n5': 5, 'n6': 6, 'n7': 7, 'n8': 8, 'n9': 9}
[0.11890665 0.0843783 0.08120754 0.18248226 0.08211511 0.10593714
0.07660571 0.08192824 0.07960276 0.1068363 ]
n3
|
Model backlog/Inference/18-tweet-inference-distilbert-base-uncased-subtrac.ipynb | ###Markdown
Dependencies
###Code
from tweet_utility_scripts import *
from transformers import TFDistilBertModel, DistilBertConfig
from tokenizers import BertWordPieceTokenizer
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Input, Dropout, GlobalAveragePooling1D, GlobalMaxPooling1D, Concatenate, Subtract
###Output
_____no_output_____
###Markdown
Load data
###Code
test = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/test.csv')
print('Test samples: %s' % len(test))
display(test.head())
###Output
Test samples: 3534
###Markdown
Model parameters
###Code
MAX_LEN = 128
question_size = 3
base_path = '/kaggle/input/qa-transformers/distilbert/'
base_model_path = base_path + 'distilbert-base-uncased-distilled-squad-tf_model.h5'
config_path = base_path + 'distilbert-base-uncased-distilled-squad-config.json'
tokenizer_path = base_path + 'bert-large-uncased-vocab.txt'
input_base_path = '/kaggle/input/18-tweet-train-distilbert-base-uncased-subtract/'
model_path_list = glob.glob(input_base_path + '*.h5')
model_path_list.sort()
print('Models to predict:')
print(*model_path_list, sep = "\n")
###Output
Models to predict:
/kaggle/input/18-tweet-train-distilbert-base-uncased-subtract/model.h5
###Markdown
Tokenizer
###Code
tokenizer = BertWordPieceTokenizer(tokenizer_path , lowercase=True)
###Output
_____no_output_____
###Markdown
Pre process
###Code
test['text'].fillna('', inplace=True)
test["text"] = test["text"].apply(lambda x: x.lower())
x_test = get_data_test(test, tokenizer, MAX_LEN)
###Output
_____no_output_____
###Markdown
Model
###Code
module_config = DistilBertConfig.from_pretrained(config_path, output_hidden_states=False)
def model_fn():
input_ids = Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_ids')
attention_mask = Input(shape=(MAX_LEN,), dtype=tf.int32, name='attention_mask')
token_type_ids = Input(shape=(MAX_LEN,), dtype=tf.int32, name='token_type_ids')
base_model = TFDistilBertModel.from_pretrained(base_model_path, config=module_config, name="base_model")
sequence_output = base_model({'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids})
last_state = sequence_output[0]
x = GlobalAveragePooling1D()(last_state)
y_start = Dense(MAX_LEN, activation='softmax')(x)
y_end = Dense(MAX_LEN, activation='softmax')(x)
y_start = Subtract(name='y_start')([y_start, y_end])
y_end = Subtract(name='y_end')([y_end, y_start])
model = Model(inputs=[input_ids, attention_mask, token_type_ids], outputs=[y_start, y_end])
return model
###Output
_____no_output_____
###Markdown
Make predictions
###Code
NUM_TEST_IMAGES = len(test)
test_start_preds = np.zeros((NUM_TEST_IMAGES, MAX_LEN))
test_end_preds = np.zeros((NUM_TEST_IMAGES, MAX_LEN))
for model_path in model_path_list:
print(model_path)
model = model_fn()
model.load_weights(model_path)
test_preds = model.predict(x_test)
test_start_preds += test_preds[0] / len(model_path_list)
test_end_preds += test_preds[1] / len(model_path_list)
###Output
/kaggle/input/18-tweet-train-distilbert-base-uncased-subtract/model.h5
###Markdown
Post process
###Code
test['start'] = test_start_preds.argmax(axis=-1)
test['end'] = test_end_preds.argmax(axis=-1)
test['text_len'] = test['text'].apply(lambda x : len(x))
test["end"].clip(0, test["text_len"], inplace=True)
test["start"].clip(0, test["end"], inplace=True)
test['selected_text'] = test.apply(lambda x: decode(x['start'], x['end'], x['text'], question_size, tokenizer), axis=1)
test["selected_text"].fillna('', inplace=True)
###Output
_____no_output_____
###Markdown
Visualize predictions
###Code
display(test.head(10))
###Output
_____no_output_____
###Markdown
Test set predictions
###Code
submission = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/sample_submission.csv')
submission['selected_text'] = test["selected_text"]
submission.to_csv('submission.csv', index=False)
submission.head(10)
###Output
_____no_output_____ |
notebooks/00_eda_ibex35.ipynb | ###Markdown
1. [Import and Ingest data in Zipline](Import-and-ingest-data-in-Zipline)1. [First Trading Algorithm](First-Trading-Algorithm) Import and ingest data in ZiplineInicio Import data from Yahoo Financials
###Code
# Import Libraries
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import pandas as pd
import zipline
from yahoofinancials import YahooFinancials
import warnings
# Default working directory
# os.chdir("../data")
# Display maximum columns
pd.set_option('display.max_columns', None)
# Seaborn graphic style as default
plt.style.use('seaborn')
# Graphics default size
plt.rcParams['figure.figsize'] = [16, 9]
plt.rcParams['figure.dpi'] = 200
# Warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
# Load IPython Magic
%load_ext watermark
%load_ext zipline
%watermark --iversions
###Output
numpy 1.14.2
zipline 1.4.0
pandas 0.22.0
###Markdown
We will download our stock series from Yahoo Financials in an Zipline accepted format, namely OHLC, volume, dividend and split, following this [article](https://medium.com/inside-bux/introduction-to-backtesting-trading-strategies-da152be3e6e0).
###Code
def download_csv_data(ticker, start_date, end_date, freq, path):
yahoo_financials = YahooFinancials(ticker)
df = yahoo_financials.get_historical_price_data(start_date, end_date, freq)
df = pd.DataFrame(df[ticker]['prices']).drop(['date'], axis=1) \
.rename(columns={'formatted_date': 'date'}) \
.loc[:, ['date', 'open', 'high', 'low', 'close', 'volume']] \
.set_index('date')
df.index = pd.to_datetime(df.index)
df['dividend'] = 0
df['split'] = 1
# save data to csv for later ingestion
df.to_csv(path, header=True, index=True)
# plot the time series
df.close.plot(title='{} prices --- {}:{}'.format(ticker, start_date, end_date));
download_csv_data(ticker='REP.MC',
start_date='2000-01-03',
end_date='2020-07-31',
freq='daily',
path='../data/OHLCV/REP.csv')
###Output
_____no_output_____
###Markdown
Ingest custom data bundle from a csv file in Quantopian Zipline In a Zipline environment, we need to (i) create a bundle and (ii) ingest it. A bundle is an interface, which imports and reads data source and hands it over to Zipline in a determined format for processing and storage. The library is able to read data incrementally, and only holds a part of the data in its memory at any given time. Ingest is the process of reading data with the help of a bundle, and storing it in Zipline's format. Zipline comes ready with data downloaded from Quandl (the WIKI database). However, in mid-2018 the data was discontinued. Furthermore, Quandl dataset only covers US stocks. For that reason, we shall create our own bundle with IBEX35 stocks and ingest it manually. Ingesting custom data bundle from a csv file is complicated and is not well explained in the documentation. The following book [Trading Evolved by Andreas Clenow](https://www.amazon.es/Trading-Evolved-Anyone-Killer-Strategies/dp/109198378X/ref=sr_1_1?adgrpid=68481793911&dchild=1&gclid=EAIaIQobChMI5JqUgsiJ6wIVCLrtCh1eAwH-EAAYASAAEgIAEPD_BwE&hvadid=327706962187&hvdev=c&hvlocphy=1005499&hvnetw=g&hvqmt=e&hvrand=5186505841026467309&hvtargid=kwd-801233091056&hydadcr=1948_1738136&keywords=trading+evolved&qid=1596819455&sr=8-1&tag=hydes-21) (chapter 23: Importing your Data) provides guidance for the task. Create the bundleFirst, we need to write a Python script (renamed after the name of our bundle) located in the bundles folder of our Zipline library containing (i) a specific function signature, (ii) a generator function, which will iterate over our stocks, process the data and populate the DataFrames we need and (iii) a dividends function checking for dividends, if any. Second, we shall register the bundle with Zipline and create an extension.py file in the /.zipline folder. Ingest the bundleFinally, we ingest the bundle in our Jupyter Notebook (see below).
###Code
from zipline.utils.calendars import get_calendar
import pandas as pd
from collections import OrderedDict
import pytz
import glob
import os
from_file_path = "/home/isabel/Repos/ml_for_algo_trading/data/OHLCV/*.csv"
to_file_path = "/home/isabel/Repos/ml_for_algo_trading/data/OHLCV/"
start_date = "2000-01-03"
end_date = "2020-07-31"
for fname in glob.glob(from_file_path):
destination_file = to_file_path + os.path.basename(fname)
print("Adding missing dates to {} and copying to {}".format(os.path.basename(fname),destination_file))
df = pd.read_csv(fname, index_col=0, parse_dates=True, sep=",", header=0)
df.index.name = 'date'
# Ensure the df is indexed by UTC timestamps
df = df.set_index(df.index.to_datetime().tz_localize('UTC'))
# Get all expected trading sessions in this range and reindex.
sessions = get_calendar('XMAD').sessions_in_range(start_date, end_date)
df = df.reindex(sessions)
df.to_csv(destination_file)
print("Finished")
# ingest our custom bundle: ibex35_stock_data
!zipline ingest --bundle ibex35_stock_data
!zipline bundles
###Output
csvdir <no ingestions>
ibex35_stock_data 2020-08-07 16:40:30.271346
quandl <no ingestions>
quantopian-quandl <no ingestions>
###Markdown
Load data bundle We will now proceed with loading our bundle. For such purpose, the [video](https://subscription.packtpub.com/video/application_development/9781789951165) Machine Learning for Algorithmic Trading Bots in Python by Mustafa Qamar-ud-Din, has been of great help. Therefore, we will first import necessary libraries.In order to help Zipline find our custom bundle, we will additionally load extensions in the IPython magic. Check this [GitHub thread](https://github.com/quantopian/zipline/issues/1542) for more information.On how to proceed with this section, this other [GitHub thread](https://github.com/quantopian/zipline/issues/1579) might also be worth exploring.
###Code
from zipline.data.data_portal import DataPortal
from zipline.data import bundles
from zipline.utils.calendars import get_calendar
import os
from zipline.utils.run_algo import load_extensions
load_extensions(
default=True,
extensions=[],
strict=True,
environ=os.environ,
)
###Output
_____no_output_____
###Markdown
First, let's load our bundle
###Code
bundle_data = bundles.load('ibex35_stock_data')
print(type(bundle_data))
###Output
<class 'zipline.data.bundles.core.BundleData'>
###Markdown
Second, we initialise our DataPortal interface with the below mandatory arguments.We shall beforehand define our end_date argument and check for the first trading day.
###Code
end_date = pd.Timestamp("2020-07-31", tz="utc")
bundle_data.equity_daily_bar_reader.first_trading_day
data_por = DataPortal(
asset_finder=bundle_data.asset_finder,
trading_calendar=get_calendar("XMAD"),
first_trading_day=bundle_data.equity_daily_bar_reader.first_trading_day,
equity_daily_reader=bundle_data.equity_daily_bar_reader
)
###Output
_____no_output_____
###Markdown
Third, we will build our DataFrame for further analysis and feature preprocessing. For such purpose, we shall use a get_history_window method on the data portal interface.
###Code
REP = data_por.asset_finder.lookup_symbol(
'REP',
as_of_date=None
)
df = data_por.get_history_window(
assets=[REP],
end_dt=end_date,
bar_count=5226,
frequency='1d',
data_frequency='daily',
field='close'
)
df.tail()
###Output
_____no_output_____
###Markdown
We now index our date column and rename the stock column after the ticker's name.
###Code
df.index = pd.DatetimeIndex(df.index)
###Output
_____no_output_____
###Markdown
The name of the column Equity(0 [REP]) is a Pandas object (more concretely, a 'zipline.assets._assets.Equity' object). When we attempt to rename it, a TypeError is displayed (Index does not support mutable operations). As such, we will create another identical column thus passing a list method to the column name and then drop the original column.
###Code
df['REP'] = df[list(df.columns)[0]]
df = df.drop(columns=[list(df.columns)[0]])
df.head()
###Output
_____no_output_____
###Markdown
First Trading AlgorithmInicio
###Code
% matplotlib inline
from zipline import run_algorithm
from zipline.api import order_target_percent, symbol
from datetime import datetime
import pytz
from matplotlib import pyplot as plt
def initialize(context):
context.stock = symbol('REP')
context.index_average_window = 100
def handle_data(context, data):
# request stock history
equities_hist = data.history(context.stock, "close", context.index_average_window, "1d")
# long if the price is above the moving average, else flat
if equities_hist[-1] > equities_hist.mean():
stock_weight = 1.0
else:
stock_weight = 0.0
# place order
order_target_percent(context.stock, stock_weight)
def analyze(context, perf):
"""plot backtest results"""
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(311)
ax.set_title('Strategy Results')
ax.semilogy(perf['portfolio_value'], linestyle='-',
label="Equity Curve", linewidth=3.0)
ax.legend()
ax.grid(False)
ax = fig.add_subplot(312)
ax.plot(perf['gross_leverage'],
label="Exposure", linestyle="-", linewidth=1.0)
ax.legend()
ax.grid(True)
ax = fig.add_subplot(313)
ax.plot(perf['returns'],
label="Returns", linestyle="-.", linewidth=1.0)
ax.legend()
ax.grid(True)
start_date = datetime(2000, 1, 3, tzinfo=pytz.UTC)
end_date = datetime(2020, 7, 31, tzinfo=pytz.UTC)
# params for zipline engine
results = run_algorithm(
start=start_date,
end=end_date,
initialize=initialize,
analyze=analyze,
handle_data=handle_data,
capital_base=10000,
data_frequency= 'daily', bundle='ibex35_stock_data'
)
###Output
_____no_output_____ |
demos/projectile-inline.ipynb | ###Markdown
Exercise: computing the distance at which a projectile will hit the ground**Author: Giovanni Pizzi, EPFL**In this exercise, you are given three parameters, defining the initial conditions at which a projectile is launched. In particular, you are given in input:- the height $h$ above the ground from which the projectile is launched- the two components (horizontal $v_x$ and vertical $v_y$) of the velocity, $\vec v = (v_x, v_y)$ at which the projectile is launched Task**Your task is to write a python function that, given these three parameters, computes the horizontal position $D$ at which the projectile will hit the ground.** How to test the resultsTo test your function, you can move the sliders below that determine the initial conditions of the projectile.A real-time visualization will show the correct solution for the problem (solid curve), where the launch point is marked by a black dot and the correct hitting point by a black cross.You will also see the result of your proposed solution as a large red circle. Finally, You can inspect possible errors of your function by opening the tab "Results of the validation of your function".
###Code
%matplotlib inline
import numpy as np
import pylab as pl
import tabulate
from ipywidgets import Label, Button, Output, FloatSlider, HBox, VBox, Layout, HTML, Accordion
from widget_code_input import WidgetCodeInput
from IPython.display import display
# Value of the vertical (downwards) acceleration
g = 9.81 # m/s^2
code_widget = WidgetCodeInput(
function_name="get_hit_coordinate",
function_parameters="vertical_position, horizontal_v, vertical_v, g={}".format(g),
docstring="""
A function to compute the hit coordinate of a projectile
on the ground, knowing the initial launch parameters.
:param vertical_position: launch vertical position [m]
:param horizontal_v: launch horizontal position [m/s]
:param vertical_v: launch vertical position [m/s]
(positive values means upward velocity)
:param g: the vertical (downwards) acceleration (default: Earth's gravity)
:return: the position at which the projectile will hit the ground [m]
""",
function_body="# Input here your solution\n# After changing the function, move one of the sliders to validate your function")
display(code_widget)
## The solution:
# import math
# return horizontal_v * (vertical_v + math.sqrt(vertical_v**2 + 2. * g * vertical_position)) / g
vertical_position_widget = FloatSlider(
value=6, min=0, max=10,
description="Vertical position [m]",
continuous_update=False,
style={'description_width': 'initial'}, layout=Layout(width='50%', min_width='350px'))
horizontal_v_widget = FloatSlider(
value=5, min=-10, max=10,
description="Horizontal velocity [m/s]",
continuous_update=False,
style={'description_width': 'initial'}, layout=Layout(width='50%', min_width='350px'))
vertical_v_widget = FloatSlider(
value=3, min=-10, max=10,
description="Vertical velocity [m/s]",
continuous_update=False,
style={'description_width': 'initial'}, layout=Layout(width='50%', min_width='350px'))
plot_box = Output()
input_box = VBox([vertical_position_widget, horizontal_v_widget, vertical_v_widget])
display(HBox([input_box, plot_box]))
check_function_output = Output()
check_accordion = Accordion(children=[check_function_output], selected_index=None)
check_accordion.set_title(0, 'Results of the validation of your function (click here to see them)')
display(check_accordion)
def trajectory(t, vertical_position, horizontal_v, vertical_v, g):
"""
Return the coordinates (x, y) at time t
"""
# We define the initial x coordinate to be zero
x0 = 0
x = x0 + horizontal_v * t
y = -0.5 * g* t**2 + vertical_v * t + vertical_position
return x, y
def hit_conditions(vertical_position, horizontal_v, vertical_v, g):
"""
Return (t, D), where t is the time at which the ground is hit, and D
is the distance at which the projectile hits the ground
"""
# We define the initial x coordinate to be zero
x0 = 0
# x = x0 + horizontal_v * t => t = (x-x0) / horizontal_v
# y = -0.5 * g* t**2 + vertical_v * t + vertical_position =>
#
# y == 0 =>
a = -0.5 * g
b = vertical_v
c = vertical_position
# the two solutions; I want the solution with positive t,
# that will in any case be t1, because
# t1 > t2 for any value of a, b, c (since a < 0)
t1 = (-b - np.sqrt(b**2 - 4 * a * c)) / (2. * a)
#t2 = (-b + np.sqrt(b**2 - 4 * a * c)) / (2. * a)
t = t1
D = x0 + horizontal_v * t
return t, D
def check_user_value():
# I don't catch exceptions so that the users can see the traceback
error_string = "YOUR FUNCTION DOES NOT SEEM RIGHT, PLEASE TRY TO FIX IT"
ok_string = "YOUR FUNCTION SEEMS TO BE CORRECT!! CONGRATULATIONS!"
test_table = []
last_exception = None
type_warning = False
check_function_output.clear_output(wait=True)
with check_function_output:
user_function = code_widget.get_function_object()
test_values_vpos = range(1,7)
test_values_vx = range(-2,3)
test_values_vy = range(-2,3)
for test_vpos in test_values_vpos:
for test_vx in test_values_vx:
for test_vy in test_values_vy:
correct_value = hit_conditions(vertical_position=test_vpos,
horizontal_v=test_vx,
vertical_v=test_vy,
g=g
)[1] # [1] because this gives D ([0] is instead t_hit)
try:
user_hit_position = user_function(
vertical_position=test_vpos,
horizontal_v=test_vx,
vertical_v=test_vy,
)
try:
error = abs(user_hit_position - correct_value)
except Exception:
type_warning = True
error = 1. # Large value so it triggers a failed test
except Exception as exc:
last_exception = exc
test_table.append([test_vpos, test_vx, test_vy, correct_value, "ERROR", False])
else:
if error > 1.e-8:
test_table.append([test_vpos, test_vx, test_vy, str(correct_value), str(user_hit_position), False])
else:
test_table.append([test_vpos, test_vx, test_vy, str(correct_value), str(user_hit_position), True])
num_tests = len(test_table)
num_passed_tests = len([test for test in test_table if test[5]])
failed_tests = [test[:-1] for test in test_table if not test[5]] # Keep only failed tests, and remove last column
MAX_FAILED_TESTS = 5
if num_passed_tests < num_tests:
html_table = HTML("<style>tbody tr:nth-child(odd) { background-color: #e2f7ff; } th { background-color: #94cae0; min-width: 100px; } td { font-family: monospace; } td, th { padding-right: 3px; padding-left: 3px; } </style>" +
tabulate.tabulate(
failed_tests[:MAX_FAILED_TESTS],
tablefmt='html',
headers=["vertical_position", "horizontal_v", "vertical_v", "Expected value", "Your value"]
))
if num_passed_tests < num_tests:
print("Your function does not seem correct; only {}/{} tests passed".format(num_passed_tests, num_tests))
print("Printing up to {} failed tests:".format(MAX_FAILED_TESTS))
display(html_table)
else:
print("Your function is correct! Very good! All {} tests passed".format(num_tests))
if type_warning:
print("WARNING! in at least one case, your function did not return a valid float number, please double check!".format(num_tests))
# Raise the last exception obtained
if last_exception is not None:
print("I obtained at least one exception")
raise last_exception from None
def get_user_value():
"""
This function returns the value computed by the user's
function for the current sliders' value, or None if there is an exception
"""
with check_function_output:
user_function = code_widget.get_function_object()
try:
user_hit_position = user_function(
vertical_position=vertical_position_widget.value,
horizontal_v=horizontal_v_widget.value,
vertical_v=vertical_v_widget.value,
)
except Exception as exc:
return None
return user_hit_position
def replot(vertical_position, horizontal_v, vertical_v):
#global the_figure, the_plot, g
global g
the_figure = pl.figure(figsize=(4,3))
the_plot = pl.subplot(1,1,1)
pl.xlabel("x [m]")
pl.xlabel("y [m]")
# Compute correct values
t_hit, D = hit_conditions(vertical_position, horizontal_v, vertical_v, g)
t_array = np.linspace(0,t_hit, 100)
x_array, y_array = trajectory(t_array, vertical_position, horizontal_v, vertical_v, g)
# Plot orrect curves and points
pl.plot([0], [vertical_position], 'ok')
pl.plot([D], [0], 'xk')
pl.plot(x_array, y_array, '-b')
## (Try to) plot user value
user_value = None
try:
user_value = get_user_value()
except Exception:
# Just a guard not to break the visualization, we should not end up here
pass
try:
if user_value is not None:
the_plot.plot([user_value], [0], 'or')
except Exception:
# We might end up here if the function does not return a float value
pass
pl.axhline(0, color='gray')
# Set zoom to fixed value
the_plot.set_xlim([-30, 30])
the_plot.set_ylim([-1, 16])
# Redraw
pl.show()
def recompute(e):
global plot_box, g
if e is not None:
if e['type'] != 'change' or e['name'] not in ['value', 'function_body']:
return
plot_box.clear_output(wait=True)
with plot_box:
replot(
vertical_position=vertical_position_widget.value,
horizontal_v=horizontal_v_widget.value,
vertical_v=vertical_v_widget.value,
)
# Print info on the "correctness" of the user's function
check_user_value()
# Bind the sliders to the event
vertical_position_widget.observe(recompute)
horizontal_v_widget.observe(recompute)
vertical_v_widget.observe(recompute)
# Bind also the code widget
code_widget.observe(recompute)
# Perform the first recomputation (to create the plot)
_ = recompute(None)
###Output
_____no_output_____
###Markdown
Exercise: computing the distance at which a projectile will hit the ground**Author: Giovanni Pizzi, EPFL**In this exercise, you are given three parameters, defining the initial conditions at which a projectile is launched. In particular, you are given in input:- the height $h$ above the ground from which the projectile is launched- the two components (horizontal $v_x$ and vertical $v_y$) of the velocity, $\vec v = (v_x, v_y)$ at which the projectile is launched Task**Your task is to write a python function that, given these three parameters, computes the horizontal position $D$ at which the projectile will hit the ground.** How to test the resultsTo test your function, you can move the sliders below that determine the initial conditions of the projectile.A real-time visualization will show the correct solution for the problem (solid curve), where the launch point is marked by a black dot and the correct hitting point by a black cross.You will also see the result of your proposed solution as a large red circle. Finally, You can inspect possible errors of your function by opening the tab "Results of the validation of your function".
###Code
%matplotlib inline
import numpy as np
import pylab as pl
import tabulate
from ipywidgets import Label, Button, Output, FloatSlider, HBox, VBox, Layout, HTML, Accordion
from widget_code_input import WidgetCodeInput
from IPython.display import display
# Value of the vertical (downwards) acceleration
g = 9.81 # m/s^2
code_widget = WidgetCodeInput(
function_name="get_hit_coordinate",
function_parameters="vertical_position, horizontal_v, vertical_v, g={}".format(g),
docstring="""
A function to compute the hit coordinate of a projectile
on the ground, knowing the initial launch parameters.
:param vertical_position: launch vertical position [m]
:param horizontal_v: launch horizontal position [m/s]
:param vertical_v: launch vertical position [m/s]
(positive values means upward velocity)
:param g: the vertical (downwards) acceleration (default: Earth's gravity)
:return: the position at which the projectile will hit the ground [m]
""",
function_body="# Input here your solution\n# After changing the function, move one of the sliders to validate your function")
display(code_widget)
## The solution:
# import math
# return horizontal_v * (vertical_v + math.sqrt(vertical_v**2 + 2. * g * vertical_position)) / g
vertical_position_widget = FloatSlider(
value=6, min=0, max=10,
description="Vertical position [m]",
continuous_update=False,
style={'description_width': 'initial'}, layout=Layout(width='50%', min_width='350px'))
horizontal_v_widget = FloatSlider(
value=5, min=-10, max=10,
description="Horizontal velocity [m/s]",
continuous_update=False,
style={'description_width': 'initial'}, layout=Layout(width='50%', min_width='350px'))
vertical_v_widget = FloatSlider(
value=3, min=-10, max=10,
description="Vertical velocity [m/s]",
continuous_update=False,
style={'description_width': 'initial'}, layout=Layout(width='50%', min_width='350px'))
plot_box = Output()
input_box = VBox([vertical_position_widget, horizontal_v_widget, vertical_v_widget])
display(HBox([input_box, plot_box]))
check_function_output = Output()
check_accordion = Accordion(children=[check_function_output], selected_index=None)
check_accordion.set_title(0, 'Results of the validation of your function (click here to see them)')
display(check_accordion)
def trajectory(t, vertical_position, horizontal_v, vertical_v, g):
"""
Return the coordinates (x, y) at time t
"""
# We define the initial x coordinate to be zero
x0 = 0
x = x0 + horizontal_v * t
y = -0.5 * g* t**2 + vertical_v * t + vertical_position
return x, y
def hit_conditions(vertical_position, horizontal_v, vertical_v, g):
"""
Return (t, D), where t is the time at which the ground is hit, and D
is the distance at which the projectile hits the ground
"""
# We define the initial x coordinate to be zero
x0 = 0
# x = x0 + horizontal_v * t => t = (x-x0) / horizontal_v
# y = -0.5 * g* t**2 + vertical_v * t + vertical_position =>
#
# y == 0 =>
a = -0.5 * g
b = vertical_v
c = vertical_position
# the two solutions; I want the solution with positive t,
# that will in any case be t1, because
# t1 > t2 for any value of a, b, c (since a < 0)
t1 = (-b - np.sqrt(b**2 - 4 * a * c)) / (2. * a)
#t2 = (-b + np.sqrt(b**2 - 4 * a * c)) / (2. * a)
t = t1
D = x0 + horizontal_v * t
return t, D
def check_user_value():
# I don't catch exceptions so that the users can see the traceback
error_string = "YOUR FUNCTION DOES NOT SEEM RIGHT, PLEASE TRY TO FIX IT"
ok_string = "YOUR FUNCTION SEEMS TO BE CORRECT!! CONGRATULATIONS!"
test_table = []
last_exception = None
type_warning = False
check_function_output.clear_output(wait=True)
with check_function_output:
user_function = code_widget.get_function_object()
test_values_vpos = range(1,7)
test_values_vx = range(-2,3)
test_values_vy = range(-2,3)
for test_vpos in test_values_vpos:
for test_vx in test_values_vx:
for test_vy in test_values_vy:
correct_value = hit_conditions(vertical_position=test_vpos,
horizontal_v=test_vx,
vertical_v=test_vy,
g=g
)[1] # [1] because this gives D ([0] is instead t_hit)
try:
user_hit_position = user_function(
vertical_position=test_vpos,
horizontal_v=test_vx,
vertical_v=test_vy,
)
try:
error = abs(user_hit_position - correct_value)
except Exception:
type_warning = True
error = 1. # Large value so it triggers a failed test
except Exception as exc:
last_exception = exc
test_table.append([test_vpos, test_vx, test_vy, correct_value, "ERROR", False])
else:
if error > 1.e-8:
test_table.append([test_vpos, test_vx, test_vy, str(correct_value), str(user_hit_position), False])
else:
test_table.append([test_vpos, test_vx, test_vy, str(correct_value), str(user_hit_position), True])
num_tests = len(test_table)
num_passed_tests = len([test for test in test_table if test[5]])
failed_tests = [test[:-1] for test in test_table if not test[5]] # Keep only failed tests, and remove last column
MAX_FAILED_TESTS = 5
if num_passed_tests < num_tests:
html_table = HTML("<style>tbody tr:nth-child(odd) { background-color: #e2f7ff; } th { background-color: #94cae0; min-width: 100px; } td { font-family: monospace; } td, th { padding-right: 3px; padding-left: 3px; } </style>" +
tabulate.tabulate(
failed_tests[:MAX_FAILED_TESTS],
tablefmt='html',
headers=["vertical_position", "horizontal_v", "vertical_v", "Expected value", "Your value"]
))
if num_passed_tests < num_tests:
print("Your function does not seem correct; only {}/{} tests passed".format(num_passed_tests, num_tests))
print("Printing up to {} failed tests:".format(MAX_FAILED_TESTS))
display(html_table)
else:
print("Your function is correct! Very good! All {} tests passed".format(num_tests))
if type_warning:
print("WARNING! in at least one case, your function did not return a valid float number, please double check!".format(num_tests))
# Raise the last exception obtained
if last_exception is not None:
print("I obtained at least one exception")
raise last_exception from None
def get_user_value():
"""
This function returns the value computed by the user's
function for the current sliders' value, or None if there is an exception
"""
with check_function_output:
user_function = code_widget.get_function_object()
try:
user_hit_position = user_function(
vertical_position=vertical_position_widget.value,
horizontal_v=horizontal_v_widget.value,
vertical_v=vertical_v_widget.value,
)
except Exception as exc:
return None
return user_hit_position
def replot(vertical_position, horizontal_v, vertical_v):
#global the_figure, the_plot, g
global g
the_figure = pl.figure(figsize=(4,3))
the_plot = pl.subplot(1,1,1)
pl.xlabel("x [m]")
pl.xlabel("y [m]")
# Compute correct values
t_hit, D = hit_conditions(vertical_position, horizontal_v, vertical_v, g)
t_array = np.linspace(0,t_hit, 100)
x_array, y_array = trajectory(t_array, vertical_position, horizontal_v, vertical_v, g)
# Plot orrect curves and points
pl.plot([0], [vertical_position], 'ok')
pl.plot([D], [0], 'xk')
pl.plot(x_array, y_array, '-b')
## (Try to) plot user value
user_value = None
try:
user_value = get_user_value()
except Exception:
# Just a guard not to break the visualization, we should not end up here
pass
try:
if user_value is not None:
the_plot.plot([user_value], [0], 'or')
except Exception:
# We might end up here if the function does not return a float value
pass
pl.axhline(0, color='gray')
# Set zoom to fixed value
the_plot.set_xlim([-30, 30])
the_plot.set_ylim([-1, 16])
# Redraw
pl.show()
def recompute(e):
global plot_box, g
if e is not None:
if e['type'] != 'change' or e['name'] not in ['value', 'function_body']:
return
plot_box.clear_output(wait=True)
with plot_box:
replot(
vertical_position=vertical_position_widget.value,
horizontal_v=horizontal_v_widget.value,
vertical_v=vertical_v_widget.value,
)
# Print info on the "correctness" of the user's function
check_user_value()
# Bind the sliders to the event
vertical_position_widget.observe(recompute)
horizontal_v_widget.observe(recompute)
vertical_v_widget.observe(recompute)
# Bind also the code widget
code_widget.observe(recompute)
# Perform the first recomputation (to create the plot)
_ = recompute(None)
###Output
_____no_output_____ |
notebooks/visual.ipynb | ###Markdown
Machine Learning Engineer Nanodegree Capstone ProjectQixiang Zhang Jul 3rd, 2018 Sensitivity Analysis
###Code
##### EXPLORE #########==================
# data exploring and basic libraries
import random
import re
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from collections import Counter
from collections import deque as dq
# Pretty display for notebooks
from IPython.display import display # Allows the use of display() for DataFrames
%matplotlib inline
###Output
_____no_output_____
###Markdown
Load the datasets
###Code
sensi = pd.read_csv("sensitivity_result.csv", index_col=0)
sensi
###Output
_____no_output_____
###Markdown
start from here
###Code
sensi['Mean'] = sensi.mean(axis=1)
print('Model Accuracy')
sensi.mean(axis=0)
sensi = sensi.sort_values(by=['Mean'])
sensi_array = sensi.drop(['cuisine'],axis=1).values
fig, ax = plt.subplots(1, 1, figsize=(25, 12), tight_layout=0)
g = sns.heatmap(sensi_array, annot=True,
xticklabels=list(sensi.columns.values[1:]),
yticklabels=sensi['cuisine'],
robust=1, vmin=-0.7, vmax=2,
cmap='YlOrRd')
g.set_xticklabels(g.get_xticklabels(), rotation = 0, fontsize = 20)
g.set_yticklabels(g.get_yticklabels(), rotation = 0, fontsize = 20)
ax.set_title(label='Sensitivity Analysis', fontdict={'fontsize':35}, pad=10)
plt.show()
###Output
_____no_output_____
###Markdown
Model Evaluation and justification (sorted by Kaggle's score):| **Model** | **Grid Search Validation Score** |**Kaggle's Accuracy Score after submission** ||----|----|----|| SVM (One-Vs-All) | 0.8082 | 0.81114 || Neural Network by Keras | 0.7975 | 0.79444 || Logistic Regression (Multinomial) | 0.7896 | 0.78660 || Logistic Regression (One-Vs-All) | 0.7958 | 0.78620 || Neural Network (MLP Classifier by sklearn) | 0.7811 | 0.78278 || Linear Classifiers with SGD | 0.7797 | 0.78057 || XGBoost | 0.7710 | 0.77896 || Random Forest (tuned) | 0.7595 | 0.75905 || LightGBM | never finished running | 0.74064 || Naive Bayes (Multinomial) | 0.7424 | 0.73793 || **Random Forest (benchmark)** | -- | **0.70002** | plot it in a spreadsheet plt the distribution prettier
###Code
rawdf_tr = pd.read_json(path_or_buf='raw_data/train.json')
sns.set(font_scale=1.5)
fig, ax = plt.subplots(1, 1, figsize=(25, 12), tight_layout=0)
ax = sns.countplot(y='cuisine', data=rawdf_tr, palette ='Set2')
ax.set_title(label='Recipe Distribution', fontdict={'fontsize':35}, pad=10)
###Output
_____no_output_____ |
tutorials/notebook/cx_site_chart_examples/barline_3.ipynb | ###Markdown
Example: CanvasXpress barline Chart No. 3This example page demonstrates how to, using the Python package, create a chart that matches the CanvasXpress online example located at:https://www.canvasxpress.org/examples/barline-3.htmlThis example is generated using the reproducible JSON obtained from the above page and the `canvasxpress.util.generator.generate_canvasxpress_code_from_json_file()` function.Everything required for the chart to render is included in the code below. Simply run the code block.
###Code
from canvasxpress.canvas import CanvasXpress
from canvasxpress.js.collection import CXEvents
from canvasxpress.render.jupyter import CXNoteBook
cx = CanvasXpress(
render_to="barline3",
data={
"z": {
"Annt1": [
"Desc:1",
"Desc:2",
"Desc:3",
"Desc:4"
],
"Annt2": [
"Desc:A",
"Desc:B",
"Desc:A",
"Desc:B"
],
"Annt3": [
"Desc:X",
"Desc:X",
"Desc:Y",
"Desc:Y"
],
"Annt4": [
5,
10,
15,
20
],
"Annt5": [
8,
16,
24,
32
],
"Annt6": [
10,
20,
30,
40
]
},
"x": {
"Factor1": [
"Lev:1",
"Lev:2",
"Lev:3",
"Lev:1",
"Lev:2",
"Lev:3"
],
"Factor2": [
"Lev:A",
"Lev:B",
"Lev:A",
"Lev:B",
"Lev:A",
"Lev:B"
],
"Factor3": [
"Lev:X",
"Lev:X",
"Lev:Y",
"Lev:Y",
"Lev:Z",
"Lev:Z"
],
"Factor4": [
5,
10,
15,
20,
25,
30
],
"Factor5": [
8,
16,
24,
32,
40,
48
],
"Factor6": [
10,
20,
30,
40,
50,
60
]
},
"y": {
"vars": [
"V1",
"V2",
"V3",
"V4"
],
"smps": [
"S1",
"S2",
"S3",
"S4",
"S5",
"S6"
],
"data": [
[
5,
10,
25,
40,
45,
50
],
[
95,
80,
75,
70,
55,
40
],
[
25,
30,
45,
60,
65,
70
],
[
55,
40,
35,
30,
15,
1
]
]
}
},
config={
"graphOrientation": "horizontal",
"graphType": "BarLine",
"legendColumns": 4,
"legendPosition": "bottom",
"lineThickness": 3,
"lineType": "spline",
"smpTitle": "Collection of Samples",
"smpTitleFontStyle": "italic",
"subtitle": "Random Data",
"theme": "CanvasXpress",
"title": "Bar-Line Graphs",
"xAxis": [
"V1",
"V2"
],
"xAxis2": [
"V3",
"V4"
]
},
width=613,
height=613,
events=CXEvents(),
after_render=[],
other_init_params={
"version": 35,
"events": False,
"info": False,
"afterRenderInit": False,
"noValidate": True
}
)
display = CXNoteBook(cx)
display.render(output_file="barline_3.html")
###Output
_____no_output_____ |
01_Introduction/05_Declaring_Operations/05_operations.ipynb | ###Markdown
OperationsThis function introduces various operations in TensorFlowDeclaring Operations
###Code
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
ops.reset_default_graph()
###Output
_____no_output_____
###Markdown
Open graph session
###Code
sess = tf.Session()
###Output
_____no_output_____
###Markdown
Arithmetic OperationsTensorFlow has multiple types of arithmetic functions. Here we illustrate the differences between `div()`, `truediv()` and `floordiv()`.`div()` : integer of division (similar to base python `//``truediv()` : will convert integer to floats.`floordiv()` : float of `div()`
###Code
print(sess.run(tf.div(3,4)))
print(sess.run(tf.truediv(3,4)))
print(sess.run(tf.floordiv(3.0,4.0)))
###Output
0
0.75
0.0
###Markdown
Mod function:
###Code
print(sess.run(tf.mod(22.0,5.0)))
###Output
2.0
###Markdown
Cross Product:
###Code
print(sess.run(tf.cross([1.,0.,0.],[0.,1.,0.])))
###Output
[ 0. 0. 1.]
###Markdown
Trig functionsSine, Cosine, and Tangent:
###Code
print(sess.run(tf.sin(3.1416)))
print(sess.run(tf.cos(3.1416)))
print(sess.run(tf.div(tf.sin(3.1416/4.), tf.cos(3.1416/4.))))
###Output
-7.23998e-06
-1.0
1.0
###Markdown
Custom operationsHere we will create a polynomial function:`f(x) = 3 * x^2 - x + 10`
###Code
test_nums = range(15)
def custom_polynomial(x_val):
# Return 3x^2 - x + 10
return(tf.subtract(3 * tf.square(x_val), x_val) + 10)
print(sess.run(custom_polynomial(11)))
###Output
362
###Markdown
What should we get with list comprehension:
###Code
expected_output = [3*x*x-x+10 for x in test_nums]
print(expected_output)
###Output
[10, 12, 20, 34, 54, 80, 112, 150, 194, 244, 300, 362, 430, 504, 584]
###Markdown
TensorFlow custom function output:
###Code
for num in test_nums:
print(sess.run(custom_polynomial(num)))
###Output
10
12
20
34
54
80
112
150
194
244
300
362
430
504
584
###Markdown
OperationsThis function introduces various operations in TensorFlowDeclaring Operations
###Code
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
ops.reset_default_graph()
###Output
_____no_output_____
###Markdown
Open graph session
###Code
sess = tf.Session()
###Output
_____no_output_____
###Markdown
Arithmetic OperationsTensorFlow has multiple types of arithmetic functions. Here we illustrate the differences between `div()`, `truediv()` and `floordiv()`.`div()` : integer of division (similar to base python `/``truediv()` : will convert integer to floats.`floordiv()` : float of `div()`
###Code
print(sess.run(tf.div(3,4)))
print(sess.run(tf.truediv(3,4)))
print(sess.run(tf.floordiv(3.0,4.0)))
###Output
0
0.75
0.0
###Markdown
Mod function:
###Code
print(sess.run(tf.mod(22.0,5.0)))
###Output
2.0
###Markdown
Cross Product:
###Code
print(sess.run(tf.cross([1.,0.,0.],[0.,1.,0.])))
###Output
[ 0. 0. 1.]
###Markdown
Trig functionsSine, Cosine, and Tangent:
###Code
print(sess.run(tf.sin(3.1416)))
print(sess.run(tf.cos(3.1416)))
print(sess.run(tf.div(tf.sin(3.1416/4.), tf.cos(3.1416/4.))))
###Output
-7.23998e-06
-1.0
1.0
###Markdown
Custom operationsHere we will create a polynomial function:`f(x) = 3 * x^2 - x + 10`
###Code
test_nums = range(15)
def custom_polynomial(x_val):
# Return 3x^2 - x + 10
return(tf.sub(3 * tf.square(x_val), x_val) + 10)
print(sess.run(custom_polynomial(11)))
###Output
362
###Markdown
What should we get with list comprehension:
###Code
expected_output = [3*x*x-x+10 for x in test_nums]
print(expected_output)
###Output
[10, 12, 20, 34, 54, 80, 112, 150, 194, 244, 300, 362, 430, 504, 584]
###Markdown
TensorFlow custom function output:
###Code
for num in test_nums:
print(sess.run(custom_polynomial(num)))
###Output
10
12
20
34
54
80
112
150
194
244
300
362
430
504
584
###Markdown
OperationsThis function introduces various operations in TensorFlowDeclaring Operations
###Code
import tensorflow as tf
from tensorflow.python.framework import ops
ops.reset_default_graph()
###Output
_____no_output_____
###Markdown
Arithmetic OperationsTensorFlow has multiple types of arithmetic functions. Here we illustrate the differences between `divide()` and `truediv()`.`math.divide()` : integer of division (similar to base python `//``truediv()` : will convert integer to floats.
###Code
print(tf.math.divide(3, 4))
print(tf.truediv(3,4))
###Output
0.75
tf.Tensor(0.75, shape=(), dtype=float64)
###Markdown
Mod function:
###Code
tf.math.mod(22.0, 5.0)
###Output
_____no_output_____
###Markdown
Cross Product:
###Code
tf.linalg.cross([1.,0.,0.], [0.,1.,0.])
###Output
_____no_output_____
###Markdown
Trig functionsSine, Cosine, and Tangent:
###Code
print(tf.sin(3.1416))
print(tf.cos(3.1416))
print(tf.math.divide(tf.sin(3.1416/4.), tf.cos(3.1416/4.)))
###Output
tf.Tensor(-7.2399803e-06, shape=(), dtype=float32)
tf.Tensor(-1.0, shape=(), dtype=float32)
tf.Tensor(1.0000036, shape=(), dtype=float32)
###Markdown
Custom operationsHere we will create a polynomial function:`f(x) = 3 * x^2 - x + 10`
###Code
test_nums = range(15)
def custom_polynomial(x_val):
# Return 3x^2 - x + 10
return(tf.subtract(3 * tf.square(x_val), x_val) + 10)
print(custom_polynomial(11))
###Output
tf.Tensor(362, shape=(), dtype=int32)
###Markdown
What should we get with list comprehension:
###Code
expected_output = [3*x*x-x+10 for x in test_nums]
print(expected_output)
###Output
[10, 12, 20, 34, 54, 80, 112, 150, 194, 244, 300, 362, 430, 504, 584]
###Markdown
TensorFlow custom function output:
###Code
for num in test_nums:
print(custom_polynomial(num))
###Output
tf.Tensor(10, shape=(), dtype=int32)
tf.Tensor(12, shape=(), dtype=int32)
tf.Tensor(20, shape=(), dtype=int32)
tf.Tensor(34, shape=(), dtype=int32)
tf.Tensor(54, shape=(), dtype=int32)
tf.Tensor(80, shape=(), dtype=int32)
tf.Tensor(112, shape=(), dtype=int32)
tf.Tensor(150, shape=(), dtype=int32)
tf.Tensor(194, shape=(), dtype=int32)
tf.Tensor(244, shape=(), dtype=int32)
tf.Tensor(300, shape=(), dtype=int32)
tf.Tensor(362, shape=(), dtype=int32)
tf.Tensor(430, shape=(), dtype=int32)
tf.Tensor(504, shape=(), dtype=int32)
tf.Tensor(584, shape=(), dtype=int32)
###Markdown
OperationsThis function introduces various operations in TensorFlowDeclaring Operations
###Code
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
ops.reset_default_graph()
###Output
/home/hduser/anaconda3/envs/myenv/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:526: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint8 = np.dtype([("qint8", np.int8, 1)])
/home/hduser/anaconda3/envs/myenv/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:527: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint8 = np.dtype([("quint8", np.uint8, 1)])
/home/hduser/anaconda3/envs/myenv/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:528: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint16 = np.dtype([("qint16", np.int16, 1)])
/home/hduser/anaconda3/envs/myenv/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:529: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint16 = np.dtype([("quint16", np.uint16, 1)])
/home/hduser/anaconda3/envs/myenv/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:530: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint32 = np.dtype([("qint32", np.int32, 1)])
/home/hduser/anaconda3/envs/myenv/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:535: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
np_resource = np.dtype([("resource", np.ubyte, 1)])
###Markdown
Open graph session
###Code
sess = tf.Session()
###Output
_____no_output_____
###Markdown
Arithmetic OperationsTensorFlow has multiple types of arithmetic functions. Here we illustrate the differences between `div()`, `truediv()` and `floordiv()`.`div()` : integer of division (similar to base python `//``truediv()` : will convert integer to floats.`floordiv()` : float of `div()`
###Code
print(sess.run(tf.div(3,4)))
print(sess.run(tf.truediv(3,4)))
print(sess.run(tf.floordiv(3.0,4.0)))
###Output
0
0.75
0.0
###Markdown
Mod function:
###Code
print(sess.run(tf.mod(22.0,5.0)))
###Output
2.0
###Markdown
Cross Product:
###Code
print(sess.run(tf.cross([1.,1.,1.],[0.,1.,0.])))
###Output
[-1. 0. 1.]
###Markdown
Trig functionsSine, Cosine, and Tangent:
###Code
print(sess.run(tf.sin(3.1416)))
print(sess.run(tf.cos(3.1416)))
print(sess.run(tf.div(tf.sin(3.1416/4.), tf.cos(3.1416/4.))))
###Output
-7.2399803e-06
-1.0
1.0000036
###Markdown
Custom operationsHere we will create a polynomial function:`f(x) = 3 * x^2 - x + 10`
###Code
test_nums = range(15)
def custom_polynomial(x_val):
# Return 3x^2 - x + 10
return(tf.subtract(3 * tf.square(x_val), x_val) + 10)
print(sess.run(custom_polynomial(11)))
###Output
362
###Markdown
What should we get with list comprehension:
###Code
expected_output = [3*x*x-x+10 for x in test_nums]
print(expected_output)
###Output
[10, 12, 20, 34, 54, 80, 112, 150, 194, 244, 300, 362, 430, 504, 584]
###Markdown
TensorFlow custom function output:
###Code
for num in test_nums:
print(sess.run(custom_polynomial(num)))
###Output
10
12
20
34
54
80
112
150
194
244
300
362
430
504
584
###Markdown
OperationsThis function introduces various operations in TensorFlowDeclaring Operations
###Code
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
ops.reset_default_graph()
###Output
_____no_output_____
###Markdown
Open graph session
###Code
sess = tf.Session()
###Output
_____no_output_____
###Markdown
Arithmetic OperationsTensorFlow has multiple types of arithmetic functions. Here we illustrate the differences between `div()`, `truediv()` and `floordiv()`.`div()` : integer of division (similar to base python `//``truediv()` : will convert integer to floats.`floordiv()` : float of `div()`
###Code
print(sess.run(tf.div(3,4)))
print(sess.run(tf.truediv(3,4)))
print(sess.run(tf.floordiv(3.0,4.0)))
###Output
0
0.75
0.0
###Markdown
Mod function:
###Code
print(sess.run(tf.mod(22.0,5.0)))
###Output
2.0
###Markdown
Cross Product:
###Code
print(sess.run(tf.cross([1.,0.,0.],[0.,1.,0.])))
###Output
[ 0. 0. 1.]
###Markdown
Trig functionsSine, Cosine, and Tangent:
###Code
print(sess.run(tf.sin(3.1416)))
print(sess.run(tf.cos(3.1416)))
print(sess.run(tf.div(tf.sin(3.1416/4.), tf.cos(3.1416/4.))))
###Output
-7.23998e-06
-1.0
1.0
###Markdown
Custom operationsHere we will create a polynomial function:`f(x) = 3 * x^2 - x + 10`
###Code
test_nums = range(15)
def custom_polynomial(x_val):
# Return 3x^2 - x + 10
return(tf.subtract(3 * tf.square(x_val), x_val) + 10)
print(sess.run(custom_polynomial(11)))
###Output
362
###Markdown
What should we get with list comprehension:
###Code
expected_output = [3*x*x-x+10 for x in test_nums]
print(expected_output)
###Output
[10, 12, 20, 34, 54, 80, 112, 150, 194, 244, 300, 362, 430, 504, 584]
###Markdown
TensorFlow custom function output:
###Code
for num in test_nums:
print(sess.run(custom_polynomial(num)))
###Output
10
12
20
34
54
80
112
150
194
244
300
362
430
504
584
###Markdown
OperationsThis function introduces various operations in TensorFlowDeclaring Operations
###Code
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from tensorflow.python.framework import ops
ops.reset_default_graph()
###Output
WARNING:tensorflow:From /home/rong/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/compat/v2_compat.py:88: disable_resource_variables (from tensorflow.python.ops.variable_scope) is deprecated and will be removed in a future version.
Instructions for updating:
non-resource variables are not supported in the long term
###Markdown
Open graph session
###Code
sess = tf.Session()
###Output
_____no_output_____
###Markdown
Arithmetic OperationsTensorFlow has multiple types of arithmetic functions. Here we illustrate the differences between `div()`, `truediv()` and `floordiv()`.`div()` : integer of division (similar to base python `//``truediv()` : will convert integer to floats.`floordiv()` : float of `div()`
###Code
print(sess.run(tf.div(3,4)))
print(sess.run(tf.truediv(3,4)))
print(sess.run(tf.floordiv(3.0,4.0)))
###Output
WARNING:tensorflow:From <ipython-input-3-ab8850756d13>:1: div (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Deprecated in favor of operator or tf.math.divide.
0
0.75
0.0
###Markdown
Mod function:
###Code
print(sess.run(tf.mod(22.0,5.0)))
###Output
2.0
###Markdown
Cross Product:
###Code
print(sess.run(tf.cross([1.,0.,0.],[0.,1.,0.])))
###Output
[0. 0. 1.]
###Markdown
Trig functionsSine, Cosine, and Tangent:
###Code
print(sess.run(tf.sin(3.1416)))
print(sess.run(tf.cos(3.1416)))
print(sess.run(tf.div(tf.sin(3.1416/4.), tf.cos(3.1416/4.))))
###Output
-7.2399803e-06
-1.0
1.0000036
###Markdown
Custom operationsHere we will create a polynomial function:`f(x) = 3 * x^2 - x + 10`
###Code
test_nums = range(15)
def custom_polynomial(x_val):
# Return 3x^2 - x + 10
return(tf.subtract(3 * tf.square(x_val), x_val) + 10)
print(sess.run(custom_polynomial(11)))
###Output
362
###Markdown
What should we get with list comprehension:
###Code
expected_output = [3*x*x-x+10 for x in test_nums]
print(expected_output)
###Output
[10, 12, 20, 34, 54, 80, 112, 150, 194, 244, 300, 362, 430, 504, 584]
###Markdown
TensorFlow custom function output:
###Code
for num in test_nums:
print(sess.run(custom_polynomial(num)))
###Output
10
12
20
34
54
80
112
150
194
244
300
362
430
504
584
|
zer1dsra/ZerOneAssigment.ipynb | ###Markdown
ZerOne Recruitment Assignment By Omid Kayhani - Submitted on Jul.11, 2020This jupyter notebook will solve the problems and present the results step by step.
###Code
# Importing the required packages
import pandas as pd
import numpy as np
import researchpy as rpy
from scipy.stats import linregress
###Output
_____no_output_____
###Markdown
Problem 1: The playing card gameThe problem could be solved in two ways: analytical or experimental. The analytical approach did not seem to be so straightforward; therefore, the experimental approach was employed to solve the problem. In this approach, the decks will be shuffled for a number of times (the default is 10,000), then the cards are drawn, and then the point for each game is calculated. It is safe to say that when the number of experiments is high enough, it can resemble an approximation of infinite number of games that is explained by an analytical solution.Let us first define a function that generates games based on the number of cards and suits in the deck and eventually generates a Pandas dataframe comprising all the games played
###Code
def GamesGen(N, M, NoE=int(1e4)):
"""
This is a function that generates a pandas dataframe that logs the games played over the designated number of
experiments with the last column indicating the points acquired in that attempt.
The function accepts the following arguments:
N = number of cards in the deck
M = number of suits in the deck
NoE = number of experiments. Equals to 10000 if not specified.
The code will generate an error if the number of cards (N) is not divisible by the number of suits (M)
"""
rem = N % M
if rem != 0:
raise Exception("Cannot split the cards amongs the suits uniformly. Please indicate another combo.")
# Number of suit members
nm = int(N/M)
# Number of draws (equal to the number of cards in the deck)
draws = pd.DataFrame(columns=range(N+1)).drop([0],axis=1)
# making the deck of cards
deck = np.empty([1,draws.shape[1]], dtype=int)
# deck = pd.Series([])
for i in range(M):
x = np.empty([1,nm],dtype=int)[0,:]
x.fill(i+1)
deck[0,np.array(range(nm))+i*nm] = x
# Making a dataframe of all games played
for i in range(NoE):
experiment = np.random.permutation(deck.reshape(deck.size,1)).reshape(1,deck.size)
ex_df = pd.DataFrame(experiment, columns=draws.columns)
draws = pd.concat([draws, ex_df], axis=0, ignore_index=True)
# Incidents of comparing the drawn card's suit with the previous one's
incidents = pd.DataFrame(index=draws.index)
for i in range(N-1):
x = draws.iloc[:,i+1] - draws.iloc[:,i]
x = pd.DataFrame(x, columns=[str(i+1)])
incidents = pd.concat([incidents,x],axis=1)
# Games played and their acquired points
games = draws
games['P'] = (incidents[incidents.columns] == 0).sum(axis=1)
return games
###Output
_____no_output_____
###Markdown
Questions 1, 2, and 5We can now use the function for M=2 and N=26 and calculate the mean of possible acquired points as well as their standard deviation. The conditional probablity can also be computed at this point.
###Code
df1 = GamesGen(26,2)
df1.head()
print('The mean of points to get for N=26 and M=2 is', df1.P.mean(axis=0))
print('The standard deviation of points to get for N=26 and M=2 is', df1.P.std(axis=0))
print('The conditional probability of P>12 given P>6 is',round(df1[df1.P>12].shape[0]/df1[df1.P>6].shape[0]*100, 2))
###Output
The mean of points to get for N=26 and M=2 is 12.0257
The standard deviation of points to get for N=26 and M=2 is 2.511029676647326
The conditional probability of P>12 given P>6 is 42.48
###Markdown
Questions 3, 4, and 6This deck has twice the number of cards and suits than the previous one. So, what we can infer is that the mean value of the points to achieve would be the same.
###Code
df2 = GamesGen(52,4)
df2.head()
print('Frequency different points acquired throughout the attempts:')
df2.P.value_counts()
print('The mean of points to get for N=52 and M=4 is', df2.P.mean(axis=0))
print('The standard deviation of points to get for N=52 and M=4 is', df2.P.std(axis=0))
print('The conditional probability of P>12 given P>6 is',round(df2[df2.P>12].shape[0]/df2[df2.P>6].shape[0]*100, 2))
###Output
The mean of points to get for N=52 and M=4 is 12.0361
The standard deviation of points to get for N=52 and M=4 is 3.0334002277468444
The conditional probability of P>12 given P>6 is 43.91
###Markdown
Problem 2: Traffic stopsThe datasets for this problem are the logs of stopped cards in the two states of Montana (MT) and Vermont (VT). The datasets include information regarding the profiles of stopped drivers and the correponding outcome for each stop such an arrest or warning. Let us first load the datasets.
###Code
# Reading the datasets
mt = pd.read_csv('MT-clean.csv', low_memory=False)
vt = pd.read_csv('VT-clean.csv',low_memory=False)
###Output
_____no_output_____
###Markdown
Meet and greet dataLet us first examine the datasets a little.
###Code
# The size of the dataset for Montana stops
mt.shape
# The size of the dataset for Vermont stops
vt.shape
# The attributes logged regarding each stop for Montana state
print(mt.columns)
# A preview of the Montana stops dataset
mt.head()
# The attributes logged regarding each stop for Vermont state
print(vt.columns)
# A preview of the Vermont stops dataset
vt.head()
print('Missing values for different attributes of MT stops:')
mt.isnull().sum()
print('Missing values for different attributes of VT stops:')
vt.isnull().sum()
###Output
Missing values for different attributes of VT stops:
###Markdown
There are missing values present for various attributes of the dataset. We will handle these missing values based on the questions to come upon neccesity. Question 1Proportion of the male stopped drivers in MT.
###Code
mt.driver_gender.value_counts()
mt_male = mt[mt.driver_gender=='M']
###Output
_____no_output_____
###Markdown
Question 2'OOS' stands for 'Out of State'. Some stops led to arrests, while the others did not. Let us first get a glimpse of these for MT.
###Code
mt.out_of_state.value_counts()
mt.is_arrested.value_counts()
###Output
_____no_output_____
###Markdown
Now, we want to see if a plate from outside Montana has more likelihood of resulting an arrest after stop. In order to do so, we need to take a Pearson’s chi-square test of association, which is because our features are of the categorical type.
###Code
# Conducting the likelihood ratio chi-square test
crosstab, LR = rpy.crosstab(mt['out_of_state'], mt['is_arrested'], test= "g-test")
LR
# Conducting the Pearson's chi-square test of independence
crosstab, chi2, expected = rpy.crosstab(mt['out_of_state'], mt['is_arrested'], test= "chi-square", expected_freqs= True)
chi2
###Output
_____no_output_____
###Markdown
It is concluded from the infinitesimal p-value that the null-hypothesis of independence is rejected and and these two attributes are correlated.Now, I must say that the term factor increase is a little vague for me. I would first think of it as percent chnage, but the attributes are categorical (could be resolved by counting values of arrests periodically), and we are comparing two different attributes with each other. Therefore, let us assume it is defined as the ratio of the number of OOS plates for which there was an arrest over the number of OOS plates for which no arrests occured. Using the contingency table derived a while ago, the factor increase would be as follows:
###Code
crosstab
print('Factor increase of arrests from OOS plates:', round(crosstab.iloc[1,1]/crosstab.iloc[1,0],2))
###Output
Factor increase of arrests from OOS plates: 0.02
###Markdown
Question 3The proportion of the stops in MT in which there was some sort of problem with the speeding of the driver is as below:
###Code
mt_speed = mt[mt.violation_raw.str.contains('SPEED',regex=True)==True]
###Output
_____no_output_____
###Markdown
Question 4Let us calculate the log likelihood ratio of Driving Under Influence (DUI) on MT over VT.
###Code
print('Number of stops due to DUI in MT:')
print(mt.violation.str.contains('DUI',regex=True).value_counts())
print('Number of stops due to DUI in VT:')
print(vt.violation.str.contains('DUI',regex=True).value_counts())
###Output
Number of stops due to DUI in MT:
False 816131
True 8914
Name: violation, dtype: int64
Number of stops due to DUI in VT:
False 280358
True 749
Name: violation, dtype: int64
###Markdown
With a similar understanding of the term 'factor increase' as before, we need to calculate the ratio of number of DUI-related stops in MT over the same in VT.
###Code
print('The factor increase of DUI-realated stops in MT over VT', round((8914/816131)/(749/280358),2))
###Output
The factor increase of DUI-realated stops in MT over VT 4.09
###Markdown
Question 5Let us first get the stops in 2020.
###Code
type(mt.stop_date[0])
mt_2020 = mt[mt.stop_date.str.contains('2020',regex=True)==True]
mt_2020.head()
###Output
_____no_output_____
###Markdown
The latest arrests recorded in the dataset occured in 2016 (mentioned in the README file of the data repo). We can solve the same question for 2016 for the sake of demonstration.
###Code
mt_2016 = mt[mt.stop_date.str.contains('2016',regex=True)==True]
mt_2016.head()
mt_2016[mt_2016.vehicle_year.notnull()].vehicle_year.astype('int32')
pd.DataFrame(mt_2016.vehicle_year.value_counts()).loc['UNK']
mt_2016[mt_2016.vehicle_year.notnull()].vehicle_year[mt_2016.vehicle_year!='UNK'].astype('int32')
print('The aveage of stopped vehicle manufacture year for 2016:',mt_2016[mt_2016.vehicle_year.notnull()].vehicle_year[mt_2016.vehicle_year!='UNK'].astype('int32').mean(axis=0))
###Output
The aveage of stopped vehicle manufacture year for 2016: 2005.8721284934402
###Markdown
In order to fit a regression, we are considering a x to be the year of vehicle manufacuring and y to be the number of stops for those vehicles.
###Code
xy = pd.DataFrame(mt_2016[mt_2016.vehicle_year.notnull()].vehicle_year[mt_2016.vehicle_year!='UNK'].astype('int32').value_counts())
xy = xy.reset_index().rename(columns={'index':'vehicle_year', 'vehicle_year':'stops'})
x = xy.vehicle_year
y = xy.stops
slope, intercept, r_value, p_value, std_err = linregress(x, y)
print('The p-value for the regression is:', p_value)
###Output
The p-value for the regression is: 1.2627895092894874e-13
###Markdown
Question 5The question is a little vague, but we can interpret it as the difference between maximum daily stops for each state and the minimum daily stops for that state, which will be implemented as follows:* First we need to calculate the number of daily stops in each of the datasets. In order to do so, we can get a value count of each 'stop_date' stored in another dataframe. * Now we can subtract the max value from the min value for each of the dataframes.
###Code
daily_stops_mt = pd.DataFrame(mt.stop_date.value_counts()).sort_values(by=['stop_date'], ascending=False)
daily_stops_vt = pd.DataFrame(vt.stop_date.value_counts()).sort_values(by=['stop_date'], ascending=False)
print('The difference between maximum daily stops and minimum daily stops in MT', max(daily_stops_mt.stop_date)-min(daily_stops_mt.stop_date))
print('The difference between maximum daily stops and minimum daily stops in VT', max(daily_stops_vt.stop_date)-min(daily_stops_vt.stop_date))
###Output
The difference between maximum daily stops and minimum daily stops in MT 950
The difference between maximum daily stops and minimum daily stops in VT 488
###Markdown
Question 6One can make use of relevant datasets that has the area information for the correponding FIPS codes for the state of Montana. The information, however, can be easily accessed over the below Wikipedia webpage:https://en.wikipedia.org/wiki/List_of_counties_in_MontanaThe answer would be found by a scraping the table in the link above.
###Code
stop_counties = pd.DataFrame(mt.county_name.value_counts()).reset_index().drop(columns=['county_name']).rename(columns={"index": "County"})
stop_counties.head()
stop_counties['Area'] = np.zeros(stop_counties.shape[0])
stop_counties.head()
page_url = 'https://en.wikipedia.org/wiki/List_of_counties_in_Montana'
table = pd.io.html.read_html(page_url, attrs={'class':'wikitable'})
mt_c = table[0][['County','Area']]
print(mt_c.shape)
mt_c.head()
for i in range(stop_counties.shape[0]-1):
if mt_c.County.str.contains(stop_counties.County[i]).any()==True:
stop_counties.Area[i] = mt_c.Area[mt_c.index[mt_c.County == stop_counties.County[i]][0]]
stop_counties.head()
stop_counties.drop(index=stop_counties.index[(stop_counties.Area==0)==True], inplace=True)
stop_counties.sort_values(by=['Area'], ascending=False).head(10)
###Output
_____no_output_____ |
05-AdalinePronostico.ipynb | ###Markdown
Pronóstico Adaptativo===**Juan David Velásquez Henao** [email protected] Universidad Nacional de Colombia, Sede Medellín Facultad de Minas Medellín, Colombia---Haga click [aquí](https://github.com/jdvelasq/deep-neural-nets/tree/master/) para acceder al repositorio online.Haga click [aquí](http://nbviewer.jupyter.org/github/jdvelasq/deep-neural-nets/tree/master/) para explorar el repositorio usando `nbviewer`. --- Definición del problema Hay un sistema que emite una señal $d(t)$ contaminada con ruido, cuyas características pueden cambiar en el tiempo. Una muestra de una señal característica se presenta en la gráfica de abajo. Para efectos de control, se requiere un sistema que pronostique el valor actual de la señal, $d(t)$, con base en sus valores pasados $d(t-1)$, $d(t-2)$, ... Para facilitar el problema, a continuación se genera la señal $d(t)$.
###Code
import numpy as np
import matplotlib.pyplot as plt
import math
%matplotlib inline
np.random.seed(12345)
d = [1.0 * math.sin(t * math.pi / 8) for t in range(64)]
d += [0.5 * math.sin(t * math.pi / 4) for t in range(64, 134)]
d += [0.8 * math.sin(t * math.pi / 12) for t in range(134, 250)]
d = [x + 0.06 * np.random.standard_normal() for x in d]
plt.figure(figsize=(11,3))
plt.plot(d, color='black');
###Output
_____no_output_____
###Markdown
Metodología de Solución El modelo que se presenta a continuación representa un perceptrón bipolar (con salida $\in \{-1, +1\}$) con entradas y pesos reales, el cual puede ser representado como:donde la entrada neta se calcula como:$$v = w_0 + \sum_{i=1}^n w_i x_i = \mathbf{w}^T \mathbf{x}$$con $\mathbf{w} = [w_0, ..., w_n]^T$ y $\mathbf{x} = [+1, x_1, x_2, ..., x_n]^T$. La regla de activación es:$$\varphi(v) = \begin{cases} +1, & \text{Si $v \ge 0$}\\ -1, & \text{Si $v \lt 0$}\\\end{cases}$$ Este tipo de perceptrón puede ser utilizado para representar funciones lógicas bipolares:---**Ejercicio.--** Especifique pesos que permitan representar las siguientes funciones:--- Combinador lineal adaptativoLa primera parte del perceptrón bipolar corresponde a un combinador lineal adaptativo (ADALINE), mientras que la función de activación es un conmutador bipolar.El combinador lineal adaptativo puede:* Adaptarse automáticamente a ambientes dinámicos (¿auto-optimización?).* Realizar tareas de filtrado, pronóstico y toma de decisiones .* Eextrapolar el comportamiento de un sistema con el fin de manejar nuevas situaciones.* Reproducir el comportamiento de sistemas no lineales con parámetros cambiantes en el tiempo. Un ADALINE es un sistema adaptativo de lazo cerrado. Los sistemas de *lazo abierto* se basan en el siguiente procedimiento:* Medida de la información de entrada al sistema.* Alicación de una fórmula o algoritmo.* Aplicación del resultado para ajustar el sistema adaptativo (La adaptación no depende explícitamente de las propiedades de la salida).En los sistemas de lazo cerrado:* Experimentación automática ajustado el sistema adaptativo.* Comparación entre el resultado deseado y la salida del sistema adaptativo para modificar los ajustes. En el caso del problema planteado, el pronóstico se realiza usando el siguiente sistema adaptativo de lazo cerrado, en el cual, el modelo adaptativo es un ADALINE. Estimación de los pesos del ADALINE usando $\mu$-LMSSe definen dos tipos de errores:* Error lineal:$$e(k)=d(k)-y(k)=d(k)-\mathbf{w}^T (k) \mathbf{x}(k)=d(k)-\mathbf{x}^T(k)\mathbf{w}(k)$$* Error cuadrático:$$e^2 (k)=d^2 (k)+\mathbf{x}^T (k)\mathbf{w}(k) \mathbf{w}^T (k) \mathbf{x}(k)-2d(k) \mathbf{x}^T (k)\mathbf{w}(k)$$ La estimación de los pesos puede hacerse mediante el método del gradiente descendente o Algoritmo $\mu$-LMS. En este caso, la función de error se define como $e^2_l(k)$, cuyo gradiente es:$$\nabla \left(e^2_l(k) \right) = \frac{\partial e^2_l(k)}{\partial \mathbf{w} (k)} =\begin{bmatrix} \frac{\partial e^2_l(k)}{\partial w_0 (k)} \\ \vdots \\ \frac{\partial e^2_l(k)}{\partial w_n (k)} \end{bmatrix} $$ ---**Ejercicio.--** Explique la fórmula anterior.--- Entonces:$$\mathbf{w}(k+1) = \mathbf{w}(k) - \mu \frac{\partial e^2_l(k)}{\partial \mathbf{w} (k)} = \mathbf{w}(k) + 2 \mu e_l(k) \mathbf{x}(k) $$El algoritimo converge si:$$0 < \mu < \frac{1}{1 + \sum_{i=0}^n E[x_i^2]}$$ Solución al problema propuesto usando `numpy`.
###Code
## mu es la tasa de aprendizaje (escogida arbitrariamente)
mu = 0.05
## Para pronosticar el valor actual se toman los `L`
## valores previos de la serie
L = 5
## Los pesos iniciales del ADALINE son
## fijados arbitrariamente
## w es un vector columna
w = np.array(0.1 * np.random.standard_normal(L+1)).reshape(L+1, 1)
y_pred = np.zeros(len(d))
y_pred[0:L] = np.nan
for t in range(L, len(d)):
x = np.array([1] + d[t-L:t]) ## entrada al ADALINE
x = x.reshape(len(x), 1)
p = w.T @ x ## pronostico
y_pred[t] = p
## algoritmo de aprendizaje
e = d[t] - p
w = w + 2 * mu * e * x
plt.figure(figsize=(14,3))
plt.plot(d, color='black');
plt.plot(y_pred, color = 'red');
###Output
_____no_output_____ |
03 - Object Detection.ipynb | ###Markdown
Objekterkennung
Die *Objekterkennung* ist eine Art von maschinellem Sehen, bei dem ein Machine Learning-Modell trainiert wird, um Instanzen von Objekten in einem Bild zu erkennen und deren Position mit einem *Begrenzungsrahmen* zu markieren. Sie können sich die Objekterkennung als eine Weiterentwicklung der *Bildklassifizierung* vorstellen (Was ist auf diesem Bild zu sehen?). Hiermit können wir Lösungen erstellen, die ermitteln, welche Objekte auf einem Bild zu sehen sind und an welcher Position sie sich befinden.

Ein Lebensmittelgeschäft kann beispielsweise ein Objekterkennungsmodell einsetzen, um ein automatisiertes Kassensystem zu entwickeln, das ein Förderband mit einer Kamera scannt und Waren erkennt, ohne dass diese einzeln auf das Band gelegt und gescannt werden müssen.
Der Cognitive Service **Custom Vision** in Microsoft Azure bietet eine cloudbasierte Lösung zum Erstellen und Veröffentlichen von benutzerdefinierten Objekterkennungsmodellen.
Erstellen einer Custom Vision-Ressource
Um den Custom Vision-Dienst nutzen zu können, benötigen Sie eine Azure-Ressource, mit der Sie ein Modell trainieren können, und eine Ressource, mit der Sie den Dienst veröffentlichen und für Anwendungen bereitstellen können. Sie können entweder dieselbe Ressource für diese Aufgaben verwenden, oder Sie können unterschiedliche Ressourcen verwenden, um die Kosten separat zu verwalten, sofern die Ressourcen in derselben Region erstellt wurden. Für diese Aufgaben können Sie entweder eine allgemeine **Cognitive Services**-Ressource oder eine spezifische **Custom Vision**-Ressource verwenden. Gehen Sie wie folgt vor, um eine neue **Custom Vision**-Ressource zu erstellen. Sie können bei Bedarf auch eine vorhandene Ressource verwenden.
1. Öffnen Sie das Azure-Portal unter [https://portal.azure.com](https://portal.azure.com) in einer neuen Browserregisterkarte, und melden Sie sich mit dem Microsoft-Konto an, das Ihrem Azure-Abonnement zugeordnet ist.
2. Wählen Sie die Schaltfläche **&65291;Ressource erstellen** aus, suchen Sie nach *custom vision*, und erstellen Sie eine **Custom Vision**-Ressource mit den folgenden Einstellungen:
- **Erstellungsoptionen**: Beide
- **Abonnement**: *Ihr Azure-Abonnement*
- **Ressourcengruppe**: *Wählen Sie eine Ressourcengruppe aus, oder erstellen Sie eine Ressourcengruppe mit einem eindeutigen Namen.*
- **Name**: *Geben Sie einen eindeutigen Namen ein.*
- **Speicherort für das Training**: *Wählen Sie eine verfügbare Region aus.*
- **Tarif für Training**: F0
- **Speicherort für die Vorhersage**: *Entspricht der Trainingsressource*
- **Tarif für Vorhersage**: F0
> **Hinweis**: Falls Ihr Abonnement bereits einen F0 Custom Vision-Dienst enthält, wählen Sie hier **S0** aus.
3. Warten Sie, bis die Ressource erstellt wurde.
Erstellen eines Custom Vision-Projekts
Um ein Objekterkennungsmodell zu trainieren, müssen Sie ein Custom Vision-Projekt mit Ihrer Trainingsressource erstellen. Dazu verwenden Sie das Custom Vision-Portal.
1. Öffnen Sie das Custom Vision-Portal unter [https://customvision.ai](https://customvision.ai) in einer neuen Browserregisterkarte, und melden Sie sich mit dem Microsoft-Konto an, das Ihrem Azure-Abonnement zugeordnet ist.
2. Erstellen Sie ein neues Projekt mit den folgenden Einstellungen:
- **Name**: Erkennung von Lebensmitteln
- **Beschreibung**: Objekterkennung für Lebensmittel
- **Ressource**: *Die zuvor erstellte Custom Vision-Ressource*
- **Projekttypen**: Objekterkennung
- **Domänen**: Allgemein
3. Warten Sie, bis das Projekt erstellt und im Browser geöffnet wurde.
Hinzufügen und Kennzeichnen von Bildern
Um ein Objekterkennungsmodell zu trainieren, müssen Sie Bilder hochladen, die die Klassen enthalten, die das Modell identifizieren soll. Anschließend müssen Sie die Bilder mit einem Tag kennzeichnen, um Begrenzungsrahmen für die einzelnen Objektinstanzen anzugeben.
1. Laden Sie die Trainingsbilder unter „https://aka.ms/fruit-objects“ herunter, und extrahieren Sie sie. Der extrahierte Ordner enthält eine Sammlung von Bildern mit Obst. **Hinweis:** Falls Sie nicht auf die Training-Images zugreifen können, navigieren Sie als temporäre Problemumgehung zu https://www.github.com, then go to https://aka.ms/fruit-objects.
2. Vergewissern Sie sich im Custom Vision-Portal [https://customvision.ai](https://customvision.ai), dass Sie sich in Ihrem Objekterkennungsprojekt _Erkennung von Lebensmitteln_ befinden. Wählen Sie anschließend die Option **Bilder hinzufügen** aus, und laden Sie alle Bilder aus dem extrahierten Ordner hoch.

3. Warten Sie, bis die Bilder hochgeladen wurden, und wählen Sie das erste Bild aus, um es zu öffnen.
4. Fahren Sie mit der Maus über alle Objekte im Bild, bis eine automatisch erkannte Region angezeigt wird (siehe Bild unten). Wählen Sie anschließend das Objekt aus, und passen Sie die Größe der Region um das Objekt herum bei Bedarf an.

Alternativ können Sie einen Bereich um das Objekt herum ziehen, um eine Region zu erstellen.
5. Wenn die Region das Objekt umgibt, fügen sie ein neues Tag mit dem entsprechenden Objekttyp hinzu (*Apfel*, *Banane* oder *Orange*), so wie hier gezeigt:

6. Wählen Sie die restlichen Objekte im Bild aus, markieren Sie diese mit einem Tag, passen Sie die Größe der jeweiligen Region an, und fügen Sie neue Tags wie erforderlich hinzu.

7. Mit dem Link **>** auf der rechten Seite können Sie zum nächsten Bild wechseln und die dort enthaltenen Objekte markieren. Bearbeiten Sie die gesamte Bildersammlung auf diese Weise, indem Sie alle Äpfel, Bananen und Orangen markieren.
8. Wenn Sie alle Bilder markiert haben, schließen Sie den **Bilddetail**-Editor, und wählen Sie auf der Seite **Trainingsbilder** unter **Tags** die Option **Markiert** aus, um Ihre markierten Bilder anzuzeigen:

Trainieren und Testen eines Modells
Nachdem Sie die Bilder in Ihrem Projekt markiert haben, können Sie ein Modell trainieren.
1. Klicken Sie im Custom Vision-Projekt auf **Trainieren**, um ein Objekterkennungsmodell mit den markierten Bildern zu trainieren. Wählen Sie die Option **Schnelltraining** aus.
2. Warten Sie, bis der Vorgang abgeschlossen wurde (kann etwa zehn Minuten dauern), und überprüfen Sie die Leistungsmetriken *Genauigkeit*, *Abruf* und *mAP*. Diese Metriken messen die Vorhersagegenauigkeit des Klassifizierungsmodells und sollten jeweils den Wert „Hoch“ haben.
3. Klicken Sie oben rechts auf der Seite auf **Schnelltest**, geben Sie `https://aka.ms/apple-orange` in das Feld **Bild-URL** ein, und sehen Sie sich die generierte Vorhersage an. Schließen Sie dann das Fenster **Schelltest**.
Veröffentlichen und Verwenden des Objekterkennungsmodells
Sie können Ihr trainiertes Modell jetzt veröffentlichen und aus einer Clientanwendung heraus verwenden.
1. Klicken Sie oben links auf der Seite **Leistung** auf **&128504; Veröffentlichen**, um das trainierte Modell mit den folgenden Einstellungen zu veröffentlichen:
- **Modellname**: Lebensmittelerkennung
- **Vorhersageressource**: *Ihre Custom Vision-**Vorhersageressource***.
(!) Überprüfung
Haben Sie den gleichen Modellnamen (**Lebensmittelerkennung**) verwendet?
2. Klicken Sie nach dem Veröffentlichen auf das Symbol *Einstellungen* (&9881;) oben rechts auf der Seite **Leistung**, um die Projekteinstellungen zu öffnen. Kopieren Sie anschließend unter **Allgemein** (linke Seite) die **Projekt-ID**. Scrollen Sie nach unten, und fügen Sie sie in die Codezelle unter Schritt 5 anstelle von **YOUR_PROJECT_ID** ein.
> (*Falls Sie am Anfang dieses Labs eine **Cognitive Services**-Ressource verwendet haben, anstatt eine **Custom Vision**-Ressource zu erstellen, können Sie den Schlüssel und den Endpunkt auf der rechten Seite der Projekteinstellungen kopieren, in die unten stehende Codezelle einfügen und sie anschließend ausführen, um das Ergebnis anzuzeigen. Führen Sie andernfalls die folgenden Schritte aus, um den Schlüssel und den Endpunkt für Ihre Custom Vision-Vorhersageressource abzurufen*).
3. Klicken Sie links oben auf der Seite **Projekteinstellungen** auf das Symbol für den *Projektkatalog* (&128065;), um zur Startseite des Custom Vision-Portals zu gelangen, auf der Ihr Projekt jetzt aufgelistet wird.
4. Klicken Sie auf der Startseite des Custom Vision-Portals oben rechts auf das Symbol *Einstellungen* (&9881;), um die Einstellungen für Ihren Custom Vision-Dienst anzuzeigen. Erweitern Sie anschließend unter **Ressourcen** Ihre *Vorhersageressource* (nicht die Trainingsressource), und kopieren Sie die Werte unter **Schlüssel** und **Endpunkt** in die Codezelle unter Schritt 5. Ersetzen Sie dabei **YOUR_KEY** und **YOUR_ENDPOINT**.
(!) Überprüfung
Falls Sie eine **Custom Vision**-Ressource verwenden: Haben Sie die **Vorhersageressource** verwendet (nicht die Trainingsressource)?
5. Führen Sie die folgende Codezelle aus, indem Sie oben links in der Zelle auf die Schaltfläche „Zelle ausführen“ &9655; klicken, um Ihre Werte für Projekt-ID, Schlüssel und Endpunkt als Variablenwerte festzulegen.
###Code
project_id = 'YOUR_PROJECT_ID' # Replace with your project ID
cv_key = 'YOUR_KEY' # Replace with your prediction resource primary key
cv_endpoint = 'YOUR_ENDPOINT' # Replace with your prediction resource endpoint
model_name = 'detect-produce' # this must match the model name you set when publishing your model iteration exactly (including case)!
print('Ready to predict using model {} in project {}'.format(model_name, project_id))
###Output
_____no_output_____
###Markdown
Anschließend können Sie Ihren Schlüssel und Endpunkt in einem Custom Vision-Client verwenden, um sich mit Ihrem Custom Vision-Objekterkennungsmodell zu verbinden.
Führen Sie die folgende Codezelle aus, die Ihr Modell verwendet, um einzelne Lebensmittel in einem Bild zu erkennen.
> **Hinweis**: Sorgen Sie sich nicht zu sehr um die Codedetails. Der Code verwendet das Python-SDK für den Custom Vision-Dienst, um ein Bild an Ihr Modell zu übermitteln und Vorhersagen für die erkannten Objekte zu erhalten. Jede Vorhersage besteht aus einem Klassennamen (*Apfel*, *Banane* oder *Orange*) und Koordinaten für einen *Begrenzungsrahmen*, um anzugeben, wo sich das erkannte Objekt innerhalb des Bilds befindet. Der Code verwendet diese Informationen, um im Bild beschriftete Rahmen um die einzelnen Objekte zu zeichnen.
###Code
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
from msrest.authentication import ApiKeyCredentials
from matplotlib import pyplot as plt
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import os
%matplotlib inline
# Load a test image and get its dimensions
test_img_file = os.path.join('data', 'object-detection', 'produce.jpg')
test_img = Image.open(test_img_file)
test_img_h, test_img_w, test_img_ch = np.array(test_img).shape
# Get a prediction client for the object detection model
credentials = ApiKeyCredentials(in_headers={"Prediction-key": cv_key})
predictor = CustomVisionPredictionClient(endpoint=cv_endpoint, credentials=credentials)
print('Detecting objects in {} using model {} in project {}...'.format(test_img_file, model_name, project_id))
# Detect objects in the test image
with open(test_img_file, mode="rb") as test_data:
results = predictor.detect_image(project_id, model_name, test_data)
# Create a figure to display the results
fig = plt.figure(figsize=(8, 8))
plt.axis('off')
# Display the image with boxes around each detected object
draw = ImageDraw.Draw(test_img)
lineWidth = int(np.array(test_img).shape[1]/100)
object_colors = {
"apple": "lightgreen",
"banana": "yellow",
"orange": "orange"
}
for prediction in results.predictions:
color = 'white' # default for 'other' object tags
if (prediction.probability*100) > 50:
if prediction.tag_name in object_colors:
color = object_colors[prediction.tag_name]
left = prediction.bounding_box.left * test_img_w
top = prediction.bounding_box.top * test_img_h
height = prediction.bounding_box.height * test_img_h
width = prediction.bounding_box.width * test_img_w
points = ((left,top), (left+width,top), (left+width,top+height), (left,top+height),(left,top))
draw.line(points, fill=color, width=lineWidth)
plt.annotate(prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100),(left,top), backgroundcolor=color)
plt.imshow(test_img)
###Output
_____no_output_____
###Markdown
Detecção de Objetos
*Detecção de objetos* é uma forma de pesquisa visual computacional em que um modelo de machine learning é treinado para classificar instâncias individuais de objetos em uma imagem e indicar uma *caixa delimitadora* que marca a localização delas. Pense nisso como um avanço da *classificação de imagem* (em que o modelo responde à pergunta "o que tem nessa imagem?") para criar soluções em que podemos questionar ao modelo "quais objetos estão nessa imagem e onde eles se encontram?".

Por exemplo, uma mercearia pode usar um modelo de detecção de objetos para implementar um sistema automatizado de finalização de compra que examina uma esteira rolante usando uma câmera e identifica itens específicos, sem a necessidade de colocar cada um deles sobre a esteira e escaneá-los individualmente.
O serviço cognitivo de **Visão Personalizada** do Microsoft Azure oferece uma solução baseada em nuvem para criar e publicar modelos personalizados de detecção de objetos.
Criar um recurso de Visão Personalizada
Para usar o serviço de Visão Personalizada, é preciso ter um recurso do Azure que você possa usar para treinar um modelo, e um recurso com o qual possa publicá-lo para os aplicativos utilizarem. Você pode usar o mesmo recurso para as duas tarefas, ou recorrer a recursos diferentes para alocar os custos separadamente, desde que ambos sejam criados na mesma região. Esses recursos podem ser gerais dos **Serviços Cognitivos** ou específicos de **Visão Personalizada**. Use as instruções a seguir para criar um novo recurso de **Visão personalizada** (ou use um recurso existente, caso já tenha um).
1. Em uma nova guia do navegador, abra o portal do Azure em [https://portal.azure.com](https://portal.azure.com) e conecte-se usando a conta Microsoft associada à sua assinatura do Azure.
2. Selecione o botão **&65291;Criar um recurso**, procure a opção *Visão personalizada* e crie um recurso de **Visão personalizada** com as seguintes configurações:
- **Opções de criação**: Ambos
- **Assinatura**: *sua assinatura do Azure*
- **Grupo de recursos**: *Selecione ou crie um grupo de recursos com um nome exclusivo*
- **Nome**: *Insira um nome exclusivo*
- **Local de treinamento**: *Escolha qualquer região disponível*
- **Tipo de preço de treinamento**: F0
- **Local de previsão**: *o mesmo que o local de treinamento*
- **Tipo de preço de previsão **: F0
>**Observação**: se você já tiver um serviço de visão personalizada F0 na sua assinatura, selecione **S0** nesse.
3. Aguarde até o recurso ser criado.
Criar um projeto de Visão Personalizada
Para treinar um modelo de detecção de objetos, é necessário criar um projeto de Visão Personalizada com base no seu recurso de treinamento. Para isso, você usará o portal Visão Personalizada.
1. Em uma nova guia do navegador, abra o portal de Visão Personalizada em [https://customvision.ai](https://customvision.ai) e conecte-se usando a conta Microsoft associada à sua assinatura do Azure.
2. Crie um novo projeto com as seguintes configurações:
- **Nome**: detecção de mercadorias
- **Descrição**: detecção de objetos para mercearias.
- **Recurso**: *o recurso de Visão Personalizada que você criou anteriormente*
- **Tipos de projeto**: Detecção de Objetos
- **Domínios**: Geral
3. Aguarde até o projeto ser criado e aberto no navegador.
Adicionar e marcar imagens
Para treinar um modelo de detecção de objetos, é preciso fazer upload de imagens que contenham as classes que você quer que o modelo identifique e marcá-las para indicar as caixas delimitadoras de cada instância do objeto.
1. Faça download e extraia as imagens de treinamento em https://aka.ms/fruit-objects. A pasta extraída contém uma coleção de imagens de frutas. **Observação:** como uma solução alternativa temporária, caso você não consiga acessar as imagens de treinamento, acesse https://www.github.com e depois https://aka.ms/fruit-objects.
2. No portal Visão personalizada [https://customvision.ai](https://customvision.ai), garanta que esteja trabalhando em sua detecção de objeto project _Grocery Detection_. Em seguida, selecione **Adicionar imagens** e faça upload de todas as imagens da pasta extraída.

3. Depois que as imagens tiverem sido carregadas, selecione a primeira delas para abrir.
4. Mantenha o mouse sobre qualquer objeto da imagem até que uma região detectada automaticamente seja exibida, como na imagem abaixo. Em seguida, selecione o objeto e, se necessário, redimensione a região para envolvê-lo.

Outra opção é simplesmente arrastar o cursor ao redor de um objeto para criar uma região.
5. Quando a região envolver o objeto, adicione uma nova marca com o tipo de objeto apropriado (*maçã*, *banana* ou *laranja*), como mostrado aqui:

6. Selecione e marque os outros objetos da imagem, redimensionando as regiões e adicionando novas marcas conforme o necessário.

7. Use o link **>** à direita para ir para a próxima imagem e marcar os objetos encontrados nela. Continue trabalhando em toda a coleção de imagens, marcando todas as maçãs, bananas e laranjas.
8. Quando terminar de marcar a última imagem, feche o editor de **Detalhes da imagem** e, na página **Imagens de treinamento**, na seção **Marcas**, selecione **Marcadas** para ver todas as suas imagens com marcações:

Treinar e testar um modelo
Agora que já marcou as imagens do projeto, você está pronto para treinar um modelo.
1. No projeto de Visão Personalizada, clique em **Treinar** para treinar um modelo de detecção de objetos usando as imagens com marcas. Selecione a opção **Treinamento rápido**.
2. Aguarde a conclusão do treinamento (o processo pode levar dez minutos ou mais) e, em seguida, analise as métricas de desempenho *Precisão*, *Recall* e *mAP* – elas medem a precisão da previsão do modelo de classificação e devem ser altas.
3. No canto superior direito da página, clique em **Teste rápido**. Em seguida, na caixa **URL da imagem**, insira `https://aka.ms/apple-orange` e veja a precisão que será gerada. Depois, feche a janela de **Teste rápido**.
Publicar e consumir o modelo de detecção de objetos
Agora você está pronto para publicar seu modelo treinado e usá-lo em um aplicativo cliente.
1. No canto superior esquerdo da página **Desempenho**, clique em **&128504; Publicar** para publicar o modelo treinado com as seguintes configurações:
- **Nome do modelo**: detecção de frutas
- **Recurso de previsão**: *seu recurso de **previsão de** Visão Personalizada*.
(!) Verificação
Você usou o mesmo nome do modelo: **detecção de frutas**?
2. Depois de publicar, clique no ícone de *configurações* (&9881;) no canto superior direito da página de **Desempenho** para visualizar as configurações do projeto. Na seção **Geral** (à esquerda), copie a **ID do projeto**. Role para baixo e cole-o na célula de código debaixo da etapa 5, substituindo **YOUR_PROJECT_ID**.
> (*Se você tiver usado um recurso dos **Serviços Cognitivos** em vez de criar um recurso de **Visão Personalizada** no início deste exercício, poderá copiar a chave e o ponto de extremidade correspondentes no lado direito das configurações do projeto, colar esses dados na célula de código abaixo e executá-la para ver os resultados. Caso contrário, continue concluindo as etapas abaixo para obter a chave e o ponto de extremidade do seu recurso de previsão de Visão Personalizada*).
3. No canto superior esquerdo da página **Configurações do projeto**, clique no ícone das *Galeria de projetos* (&128065;) para voltar à página inicial do portal de Visão Personalizada, onde seu projeto estará listado agora.
4. Na página inicial do portal de Visão Personalizada, no canto superior direito, clique no ícone de *configurações* (&9881;) para visualizar as configurações do seu serviço de Visão Personalizada. Depois, na seção **Recursos**, expanda o recurso de *previsão* (não o de treinamento) e copie os valores correspondentes de **Chave** e **Ponto de extremidade** na célula de código abaixo da etapa 5, substituindo **YOUR_KEY** e **YOUR_ENDPOINT**.
(!) Verificação
Caso esteja usando um recurso de **Visão Personalizada**, você usou o recurso de **previsão** (não o de treinamento)?
5. Execute a célula de código abaixo clicando no botão Executar célula &9655; (no canto superior esquerdo) para definir as variáveis como os valores de ID do projeto, chave e ponto de extremidade.
###Code
project_id = 'YOUR_PROJECT_ID' # Replace with your project ID
cv_key = 'YOUR_KEY' # Replace with your prediction resource primary key
cv_endpoint = 'YOUR_ENDPOINT' # Replace with your prediction resource endpoint
model_name = 'detect-produce' # this must match the model name you set when publishing your model iteration exactly (including case)!
print('Ready to predict using model {} in project {}'.format(model_name, project_id))
###Output
_____no_output_____
###Markdown
Agora você pode usar sua chave e ponto de extremidade com um cliente de Visão Personalizada para se conectar ao modelo de detecção de objetos de visão personalizada.
Execute a célula de código abaixo, que usa o seu modelo para detectar itens de hortifruti individuais em uma imagem.
> **Observação**: Não se preocupe muito com os detalhes do código. Ele usa o SDK do Python para o serviço de Visão Personalizada para enviar uma imagem para o seu modelo e recuperar previsões de objetos detectados. Cada previsão consiste em um nome de classe (*maçã*, *banana* ou *laranja*) e coordenadas de uma *caixa delimitadora* que indicam onde o objeto previsto foi detectado na imagem. O código usa essas informações para traçar uma caixa rotulada ao redor de cada objeto da imagem.
###Code
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
from msrest.authentication import ApiKeyCredentials
from matplotlib import pyplot as plt
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import os
%matplotlib inline
# Load a test image and get its dimensions
test_img_file = os.path.join('data', 'object-detection', 'produce.jpg')
test_img = Image.open(test_img_file)
test_img_h, test_img_w, test_img_ch = np.array(test_img).shape
# Get a prediction client for the object detection model
credentials = ApiKeyCredentials(in_headers={"Prediction-key": cv_key})
predictor = CustomVisionPredictionClient(endpoint=cv_endpoint, credentials=credentials)
print('Detecting objects in {} using model {} in project {}...'.format(test_img_file, model_name, project_id))
# Detect objects in the test image
with open(test_img_file, mode="rb") as test_data:
results = predictor.detect_image(project_id, model_name, test_data)
# Create a figure to display the results
fig = plt.figure(figsize=(8, 8))
plt.axis('off')
# Display the image with boxes around each detected object
draw = ImageDraw.Draw(test_img)
lineWidth = int(np.array(test_img).shape[1]/100)
object_colors = {
"apple": "lightgreen",
"banana": "yellow",
"orange": "orange"
}
for prediction in results.predictions:
color = 'white' # default for 'other' object tags
if (prediction.probability*100) > 50:
if prediction.tag_name in object_colors:
color = object_colors[prediction.tag_name]
left = prediction.bounding_box.left * test_img_w
top = prediction.bounding_box.top * test_img_h
height = prediction.bounding_box.height * test_img_h
width = prediction.bounding_box.width * test_img_w
points = ((left,top), (left+width,top), (left+width,top+height), (left,top+height),(left,top))
draw.line(points, fill=color, width=lineWidth)
plt.annotate(prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100),(left,top), backgroundcolor=color)
plt.imshow(test_img)
###Output
_____no_output_____
###Markdown
Object Detection*Object detection* is a form of computer vision in which a machine learning model is trained to classify individual instances of objects in an image, and indicate a *bounding box* that marks its location. You can think of this as a progression from *image classification* (in which the model answers the question "what is this an image of?") to building solutions where we can ask the model "what objects are in this image, and where are they?".For example, a grocery store might use an object detection model to implement an automated checkout system that scans a conveyor belt using a camera, and can identify specific items without the need to place each item on the belt and scan them individually.The **Custom Vision** cognitive service in Microsoft Azure provides a cloud-based solution for creating and publishing custom object detection models. Create a Custom Vision resourceTo use the Custom Vision service, you need an Azure resource that you can use to train a model, and a resource with which you can publish it for applications to use. You can use the same resource for each of these tasks, or you can use different resources for each to allocate costs separately provided both resources are created in the same region. The resource for either (or both) tasks can be a general **Cognitive Services** resource, or a specific **Custom Vision** resource. Use the following instructions to create a new **Custom Vision** resource (or you can use an existing resource if you have one).1. In a new browser tab, open the Azure portal at [https://portal.azure.com](https://portal.azure.com), and sign in using the Microsoft account associated with your Azure subscription.2. Select the **&65291;Create a resource** button, search for *custom vision*, and create a **Custom Vision** resource with the following settings: - **Create options**: Both - **Subscription**: *Your Azure subscription* - **Resource group**: *Select or create a resource group with a unique name* - **Name**: *Enter a unique name* - **Training location**: *Choose any available region* - **Training pricing tier**: F0 - **Prediction location**: *The same as the training location* - **Prediction pricing tier**: F0 > **Note**: If you already have an F0 custom vision service in your subscription, select **S0** for this one.3. Wait for the resource to be created. Create a Custom Vision projectTo train an object detection model, you need to create a Custom Vision project based on your training resource. To do this, you'll use the Custom Vision portal.1. In a new browser tab, open the Custom Vision portal at [https://customvision.ai](https://customvision.ai), and sign in using the Microsoft account associated with your Azure subscription.2. Create a new project with the following settings: - **Name**: Grocery Detection - **Description**: Object detection for groceries. - **Resource**: *The Custom Vision resource you created previously* - **Project Types**: Object Detection - **Domains**: General3. Wait for the project to be created and opened in the browser. Add and tag imagesTo train an object detection model, you need to upload images that contain the classes you want the model to identify, and tag them to indicate bounding boxes for each object instance.1. Download and extract the training images from https://aka.ms/fruit-objects. The extracted folder contains a collection of images of fruit.2. In the Custom Vision portal, in your object detection project, select **Add images** and upload all of the images in the extracted folder.3. After the images have been uploaded, select the first one to open it.4. Hold the mouse over any object in the image until an automatically detected region is displayed like the image below. Then select the object, and if necessary resize the region to surround it.Alternatively, you can simply drag around the object to create a region.5. When the region surrounds the object, add a new tag with the appropriate object type (*apple*, *banana*, or *orange*) as shown here:6. Select and tag each other object in the image, resizing the regions and adding new tags as required.7. Use the **>** link on the right to go to the next image, and tag its objects. Then just keep working through the entire image collection, tagging each apple, banana, and orange.8. When you have finished tagging the last image, close the **Image Detail** editor and on the **Training Images** page, under **Tags**, select **Tagged** to see all of your tagged images: Train and test a modelNow that you've tagged the images in your project, you're ready to train a model.1. In the Custom Vision project, click **Train** to train an object detection model using the tagged images. Select the **Quick Training** option.2. Wait for training to complete (it might take ten minutes or so), and then review the *Precision*, *Recall*, and *mAP* performance metrics - these measure the prediction accuracy of the classification model, and should all be high.3. At the top right of the page, click **Quick Test**, and then in the **Image URL** box, enter `https://aka.ms/apple-orange` and view the prediction that is generated. Then close the **Quick Test** window. Publish and consume the object detection modelNow you're ready to publish your trained model and use it from a client application.1. At the top left of the **Performance** page, click **&128504; Publish** to publish the trained model with the following settings: - **Model name**: detect-produce - **Prediction Resource**: *Your custom vision **prediction** resource*. (!) Check In Did you use the same model name: **detect-produce**? 2. After publishing, click the *settings* (&9881;) icon at the top right of the **Performance** page to view the project settings. Then, under **General** (on the left), copy the **Project Id**. Scroll down and paste it into the code cell below step 5 replacing **YOUR_PROJECT_ID**. > (*if you used a **Cognitive Services** resource instead of creating a **Custom Vision** resource at the beginning of this exercise, you can copy its key and endpoint from the right side of the project settings, paste it into the code cell below, and run it to see the results. Otherwise, continue completing the steps below to get the key and endpoint for your Custom Vision prediction resource*).3. At the top left of the **Project Settings** page, click the *Projects Gallery* (&128065;) icon to return to the Custom Vision portal home page, where your project is now listed.4. On the Custom Vision portal home page, at the top right, click the *settings* (&9881;) icon to view the settings for your Custom Vision service. Then, under **Resources**, expand your *prediction* resource (not the training resource) and copy its **Key** and **Endpoint** values to the code cell below step 5, replacing **YOUR_KEY** and **YOUR_ENDPOINT**. (!) Check In If you are using a **Custom Vision** resource, did you use the **prediction** resource (not the training resource)?5. Run the code cell below by clicking the Run Cell &9655 button (at the top left of the cell) to set the variables to your project ID, key, and endpoint values.
###Code
project_id = 'YOUR_PROJECT_ID' # Replace with your project ID
cv_key = 'YOUR_KEY' # Replace with your prediction resource primary key
cv_endpoint = 'YOUR_ENDPOINT' # Replace with your prediction resource endpoint
model_name = 'detect-produce' # this must match the model name you set when publishing your model iteration exactly (including case)!
print('Ready to predict using model {} in project {}'.format(model_name, project_id))
#pip install azure-cognitiveservices-vision-customvision
###Output
_____no_output_____
###Markdown
Now you can use your key and endpoint with a Custom Vision client to connect to your custom vision object detection model.Run the following code cell, which uses your model to detect individual produce items in an image.> **Note**: Don't worry too much about the details of the code. It uses the Python SDK for the Custom Vision service to submit an image to your model and retrieve predictions for detected objects. Each prediction consists of a class name (*apple*, *banana*, or *orange*) and *bounding box* coordinates that indicate where in the image the predicted object has been detected. The code then uses this information to draw a labelled box around each object on the image.
###Code
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
from msrest.authentication import ApiKeyCredentials
from matplotlib import pyplot as plt
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import os
%matplotlib inline
# Load a test image and get its dimensions
test_img_file = os.path.join('data', 'object-detection', 'produce.jpg')
test_img = Image.open(test_img_file)
test_img_h, test_img_w, test_img_ch = np.array(test_img).shape
# Get a prediction client for the object detection model
credentials = ApiKeyCredentials(in_headers={"Prediction-key": cv_key})
predictor = CustomVisionPredictionClient(endpoint=cv_endpoint, credentials=credentials)
print('Detecting objects in {} using model {} in project {}...'.format(test_img_file, model_name, project_id))
# Detect objects in the test image
with open(test_img_file, mode="rb") as test_data:
results = predictor.detect_image(project_id, model_name, test_data)
# Create a figure to display the results
fig = plt.figure(figsize=(8, 8))
plt.axis('off')
# Display the image with boxes around each detected object
draw = ImageDraw.Draw(test_img)
lineWidth = int(np.array(test_img).shape[1]/100)
object_colors = {
"apple": "lightgreen",
"banana": "yellow",
"orange": "orange"
}
for prediction in results.predictions:
color = 'white' # default for 'other' object tags
if (prediction.probability*100) > 50:
if prediction.tag_name in object_colors:
color = object_colors[prediction.tag_name]
left = prediction.bounding_box.left * test_img_w
top = prediction.bounding_box.top * test_img_h
height = prediction.bounding_box.height * test_img_h
width = prediction.bounding_box.width * test_img_w
points = ((left,top), (left+width,top), (left+width,top+height), (left,top+height),(left,top))
draw.line(points, fill=color, width=lineWidth)
plt.annotate(prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100),(left,top), backgroundcolor=color)
plt.imshow(test_img)
###Output
_____no_output_____
###Markdown
Object Detection*Object detection* is a form of computer vision in which a machine learning model is trained to classify individual instances of objects in an image, and indicate a *bounding box* that marks its location. You can think of this as a progression from *image classification* (in which the model answers the question "what is this an image of?") to building solutions where we can ask the model "what objects are in this image, and where are they?".For example, a grocery store might use an object detection model to implement an automated checkout system that scans a conveyor belt using a camera, and can identify specific items without the need to place each item on the belt and scan them individually.The **Custom Vision** cognitive service in Microsoft Azure provides a cloud-based solution for creating and publishing custom object detection models. Create a Custom Vision resourceTo use the Custom Vision service, you need an Azure resource that you can use to train a model, and a resource with which you can publish it for applications to use. You can use the same resource for each of these tasks, or you can use different resources for each to allocate costs separately provided both resources are created in the same region. The resource for either (or both) tasks can be a general **Cognitive Services** resource, or a specific **Custom Vision** resource. Use the following instructions to create a new **Custom Vision** resource (or you can use an existing resource if you have one).1. In a new browser tab, open the Azure portal at [https://portal.azure.com](https://portal.azure.com), and sign in using the Microsoft account associated with your Azure subscription.2. Select the **&65291;Create a resource** button, search for *custom vision*, and create a **Custom Vision** resource with the following settings: - **Create options**: Both - **Subscription**: *Your Azure subscription* - **Resource group**: *Select existing resource group with name AI900-deploymentID* - **Name**: *object-deploymentID* - **Training location**: *Choose any available region* - **Training pricing tier**: F0 - **Prediction location**: *The same as the training location* - **Prediction pricing tier**: F0 > **Note**: If you already have an F0 custom vision service in your subscription, select **S0** for this one.3. Wait for the resource to be created. Create a Custom Vision projectTo train an object detection model, you need to create a Custom Vision project based on your training resource. To do this, you'll use the Custom Vision portal.1. In a new browser tab, open the Custom Vision portal at [https://customvision.ai](https://customvision.ai), and sign in using the Microsoft account associated with your Azure subscription.2. Create a new project with the following settings: - **Name**: Grocery Detection - **Description**: Object detection for groceries. - **Resource**: *The Custom Vision resource you created previously* - **Project Types**: Object Detection - **Domains**: General3. Wait for the project to be created and opened in the browser. Add and tag imagesTo train an object detection model, you need to upload images that contain the classes you want the model to identify, and tag them to indicate bounding boxes for each object instance.1. Download and extract the training images from https://aka.ms/fruit-objects. The extracted folder contains a collection of images of fruit.2. In the Custom Vision portal, in your object detection project, select **Add images** and upload all of the images in the extracted folder.3. After the images have been uploaded, select the first one to open it.4. Hold the mouse over any object in the image until an automatically detected region is displayed like the image below. Then select the object, and if necessary resize the region to surround it.Alternatively, you can simply drag around the object to create a region.5. When the region surrounds the object, add a new tag with the appropriate object type (*apple*, *banana*, or *orange*) as shown here:6. Select and tag each other object in the image, resizing the regions and adding new tags as required.7. Use the **>** link on the right to go to the next image, and tag its objects. Then just keep working through the entire image collection, tagging each apple, banana, and orange.8. When you have finished tagging the last image, close the **Image Detail** editor and on the **Training Images** page, under **Tags**, select **Tagged** to see all of your tagged images: Train and test a modelNow that you've tagged the images in your project, you're ready to train a model.1. In the Custom Vision project, click **Train** to train an object detection model using the tagged images. Select the **Quick Training** option.2. Wait for training to complete (it might take ten minutes or so), and then review the *Precision*, *Recall*, and *mAP* performance metrics - these measure the prediction accuracy of the classification model, and should all be high.3. At the top right of the page, click **Quick Test**, and then in the **Image URL** box, enter `https://aka.ms/apple-orange` and view the prediction that is generated. Then close the **Quick Test** window. Publish and consume the object detection modelNow you're ready to publish your trained model and use it from a client application.1. At the top left of the **Performance** page, click **&128504; Publish** to publish the trained model with the following settings: - **Model name**: detect-produce - **Prediction Resource**: *Your custom vision **prediction** resource*. (!) Check In Did you use the same model name: **detect-produce**? 2. After publishing, click the *settings* (&9881;) icon at the top right of the **Performance** page to view the project settings. Then, under **General** (on the left), copy the **Project Id**. Scroll down and paste it into the code cell below step 5 replacing **YOUR_PROJECT_ID**. > (*if you used a **Cognitive Services** resource instead of creating a **Custom Vision** resource at the beginning of this exercise, you can copy its key and endpoint from the right side of the project settings, paste it into the code cell below, and run it to see the results. Otherwise, continue completing the steps below to get the key and endpoint for your Custom Vision prediction resource*).3. At the top left of the **Project Settings** page, click the *Projects Gallery* (&128065;) icon to return to the Custom Vision portal home page, where your project is now listed.4. On the Custom Vision portal home page, at the top right, click the *settings* (&9881;) icon to view the settings for your Custom Vision service. Then, under **Resources**, expand your *prediction* resource (not the training resource) and copy its **Key** and **Endpoint** values to the code cell below step 5, replacing **YOUR_KEY** and **YOUR_ENDPOINT**. (!) Check In If you are using a **Custom Vision** resource, did you use the **prediction** resource (not the training resource)?5. Run the code cell below by clicking the Run Cell &9655 button (at the top left of the cell) to set the variables to your project ID, key, and endpoint values.
###Code
project_id = 'YOUR_PROJECT_ID' # Replace with your project ID
cv_key = 'YOUR_KEY' # Replace with your prediction resource primary key
cv_endpoint = 'YOUR_ENDPOINT' # Replace with your prediction resource endpoint
model_name = 'detect-produce' # this must match the model name you set when publishing your model iteration exactly (including case)!
print('Ready to predict using model {} in project {}'.format(model_name, project_id))
###Output
_____no_output_____
###Markdown
Now you can use your key and endpoint with a Custom Vision client to connect to your custom vision object detection model.Run the following code cell, which uses your model to detect individual produce items in an image.> **Note**: Don't worry too much about the details of the code. It uses the Python SDK for the Custom Vision service to submit an image to your model and retrieve predictions for detected objects. Each prediction consists of a class name (*apple*, *banana*, or *orange*) and *bounding box* coordinates that indicate where in the image the predicted object has been detected. The code then uses this information to draw a labelled box around each object on the image.
###Code
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
from msrest.authentication import ApiKeyCredentials
from matplotlib import pyplot as plt
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import os
%matplotlib inline
# Load a test image and get its dimensions
test_img_file = os.path.join('data', 'object-detection', 'produce.jpg')
test_img = Image.open(test_img_file)
test_img_h, test_img_w, test_img_ch = np.array(test_img).shape
# Get a prediction client for the object detection model
credentials = ApiKeyCredentials(in_headers={"Prediction-key": cv_key})
predictor = CustomVisionPredictionClient(endpoint=cv_endpoint, credentials=credentials)
print('Detecting objects in {} using model {} in project {}...'.format(test_img_file, model_name, project_id))
# Detect objects in the test image
with open(test_img_file, mode="rb") as test_data:
results = predictor.detect_image(project_id, model_name, test_data)
# Create a figure to display the results
fig = plt.figure(figsize=(8, 8))
plt.axis('off')
# Display the image with boxes around each detected object
draw = ImageDraw.Draw(test_img)
lineWidth = int(np.array(test_img).shape[1]/100)
object_colors = {
"apple": "lightgreen",
"banana": "yellow",
"orange": "orange"
}
for prediction in results.predictions:
color = 'white' # default for 'other' object tags
if (prediction.probability*100) > 50:
if prediction.tag_name in object_colors:
color = object_colors[prediction.tag_name]
left = prediction.bounding_box.left * test_img_w
top = prediction.bounding_box.top * test_img_h
height = prediction.bounding_box.height * test_img_h
width = prediction.bounding_box.width * test_img_w
points = ((left,top), (left+width,top), (left+width,top+height), (left,top+height),(left,top))
draw.line(points, fill=color, width=lineWidth)
plt.annotate(prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100),(left,top), backgroundcolor=color)
plt.imshow(test_img)
###Output
_____no_output_____
###Markdown
Object Detection*Object detection* is a form of computer vision in which a machine learning model is trained to classify individual instances of objects in an image, and indicate a *bounding box* that marks its location. You can think of this as a progression from *image classification* (in which the model answers the question "what is this an image of?") to building solutions where we can ask the model "what objects are in this image, and where are they?".For example, a grocery store might use an object detection model to implement an automated checkout system that scans a conveyor belt using a camera, and can identify specific items without the need to place each item on the belt and scan them individually.The **Custom Vision** cognitive service in Microsoft Azure provides a cloud-based solution for creating and publishing custom object detection models. Create a Custom Vision resourceTo use the Custom Vision service, you need an Azure resource that you can use to train a model, and a resource with which you can publish it for applications to use. You can use the same resource for each of these tasks, or you can use different resources for each to allocate costs separately provided both resources are created in the same region. The resource for either (or both) tasks can be a general **Cognitive Services** resource, or a specific **Custom Vision** resource. Use the following instructions to create a new **Custom Vision** resource (or you can use an existing resource if you have one).1. In a new browser tab, open the Azure portal at [https://portal.azure.com](https://portal.azure.com), and sign in using the Microsoft account associated with your Azure subscription.2. Select the **&65291;Create a resource** button, search for *custom vision*, and create a **Custom Vision** resource with the following settings: - **Create options**: Both - **Subscription**: *Your Azure subscription* - **Resource group**: *Select or create a resource group with a unique name* - **Name**: *Enter a unique name* - **Training location**: *Choose any available region* - **Training pricing tier**: F0 - **Prediction location**: *The same as the training location* - **Prediction pricing tier**: F0 > **Note**: If you already have an F0 custom vision service in your subscription, select **S0** for this one.3. Wait for the resource to be created. Create a Custom Vision projectTo train an object detection model, you need to create a Custom Vision project based on your training resource. To do this, you'll use the Custom Vision portal.1. In a new browser tab, open the Custom Vision portal at [https://customvision.ai](https://customvision.ai), and sign in using the Microsoft account associated with your Azure subscription.2. Create a new project with the following settings: - **Name**: Grocery Detection - **Description**: Object detection for groceries. - **Resource**: *The Custom Vision resource you created previously* - **Project Types**: Object Detection - **Domains**: General3. Wait for the project to be created and opened in the browser. Add and tag imagesTo train an object detection model, you need to upload images that contain the classes you want the model to identify, and tag them to indicate bounding boxes for each object instance.1. Download and extract the training images from https://aka.ms/fruit-objects. The extracted folder contains a collection of images of fruit.2. In the Custom Vision portal, in your object detection project, select **Add images** and upload all of the images in the extracted folder.3. After the images have been uploaded, select the first one to open it.4. Hold the mouse over any object in the image until an automatically detected region is displayed like the image below. Then select the object, and if necessary resize the region to surround it.Alternatively, you can simply drag around the object to create a region.5. When the region surrounds the object, add a new tag with the appropriate object type (*apple*, *banana*, or *orange*) as shown here:6. Select and tag each other object in the image, resizing the regions and adding new tags as required.7. Use the **>** link on the right to go to the next image, and tag its objects. Then just keep working through the entire image collection, tagging each apple, banana, and orange.8. When you have finished tagging the last image, close the **Image Detail** editor and on the **Training Images** page, under **Tags**, select **Tagged** to see all of your tagged images: Train and test a modelNow that you've tagged the images in your project, you're ready to train a model.1. In the Custom Vision project, click **Train** to train an object detection model using the tagged images. Select the **Quick Training** option.2. Wait for training to complete (it might take ten minutes or so), and then review the *Precision*, *Recall*, and *mAP* performance metrics - these measure the prediction accuracy of the classification model, and should all be high.3. At the top right of the page, click **Quick Test**, and then in the **Image URL** box, enter `https://aka.ms/apple-orange` and view the prediction that is generated. Then close the **Quick Test** window. Publish and consume the object detection modelNow you're ready to publish your trained model and use it from a client application.1. At the top left of the **Performance** page, click **&128504; Publish** to publish the trained model with the following settings: - **Model name**: detect-produce - **Prediction Resource**: *Your custom vision **prediction** resource*. (!) Check In Did you use the same model name: **detect-produce**? 2. After publishing, click the *settings* (&9881;) icon at the top right of the **Performance** page to view the project settings. Then, under **General** (on the left), copy the **Project Id**. Scroll down and paste it into the code cell below step 5 replacing **YOUR_PROJECT_ID**. > (*if you used a **Cognitive Services** resource instead of creating a **Custom Vision** resource at the beginning of this exercise, you can copy its key and endpoint from the right side of the project settings, paste it into the code cell below, and run it to see the results. Otherwise, continue completing the steps below to get the key and endpoint for your Custom Vision prediction resource*).3. At the top left of the **Project Settings** page, click the *Projects Gallery* (&128065;) icon to return to the Custom Vision portal home page, where your project is now listed.4. On the Custom Vision portal home page, at the top right, click the *settings* (&9881;) icon to view the settings for your Custom Vision service. Then, under **Resources**, expand your *prediction* resource (not the training resource) and copy its **Key** and **Endpoint** values to the code cell below step 5, replacing **YOUR_KEY** and **YOUR_ENDPOINT**. (!) Check In If you are using a **Custom Vision** resource, did you use the **prediction** resource (not the training resource)?5. Run the code cell below by clicking the Run Cell &9655 button (at the top left of the cell) to set the variables to your project ID, key, and endpoint values.
###Code
project_id = '1af054e0-dd9e-4f58-beb2-0ffbb607a357' # Replace with your project ID
cv_key = '1af054e0-dd9e-4f58-beb2-0ffbb607a357' # Replace with your prediction resource primary key
cv_endpoint = 'https://vision-services-paul.cognitiveservices.azure.com/' # Replace with your prediction resource endpoint
model_name = 'Grocery Detection' # this must match the model name you set when publishing your model iteration exactly (including case)!
print('Ready to predict using model {} in project {}'.format(model_name, project_id))
###Output
Ready to predict using model Grocery Detection in project 1af054e0-dd9e-4f58-beb2-0ffbb607a357
###Markdown
Now you can use your key and endpoint with a Custom Vision client to connect to your custom vision object detection model.Run the following code cell, which uses your model to detect individual produce items in an image.> **Note**: Don't worry too much about the details of the code. It uses the Python SDK for the Custom Vision service to submit an image to your model and retrieve predictions for detected objects. Each prediction consists of a class name (*apple*, *banana*, or *orange*) and *bounding box* coordinates that indicate where in the image the predicted object has been detected. The code then uses this information to draw a labelled box around each object on the image.
###Code
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
from msrest.authentication import ApiKeyCredentials
from matplotlib import pyplot as plt
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import os
%matplotlib inline
# Load a test image and get its dimensions
test_img_file = os.path.join('data', 'object-detection', 'produce.jpg')
test_img = Image.open(test_img_file)
test_img_h, test_img_w, test_img_ch = np.array(test_img).shape
# Get a prediction client for the object detection model
credentials = ApiKeyCredentials(in_headers={"Prediction-key": cv_key})
predictor = CustomVisionPredictionClient(endpoint=cv_endpoint, credentials=credentials)
print('Detecting objects in {} using model {} in project {}...'.format(test_img_file, model_name, project_id))
# Detect objects in the test image
with open(test_img_file, mode="rb") as test_data:
results = predictor.detect_image(project_id, model_name, test_data)
# Create a figure to display the results
fig = plt.figure(figsize=(8, 8))
plt.axis('off')
# Display the image with boxes around each detected object
draw = ImageDraw.Draw(test_img)
lineWidth = int(np.array(test_img).shape[1]/100)
object_colors = {
"apple": "lightgreen",
"banana": "yellow",
"orange": "orange"
}
for prediction in results.predictions:
color = 'white' # default for 'other' object tags
if (prediction.probability*100) > 50:
if prediction.tag_name in object_colors:
color = object_colors[prediction.tag_name]
left = prediction.bounding_box.left * test_img_w
top = prediction.bounding_box.top * test_img_h
height = prediction.bounding_box.height * test_img_h
width = prediction.bounding_box.width * test_img_w
points = ((left,top), (left+width,top), (left+width,top+height), (left,top+height),(left,top))
draw.line(points, fill=color, width=lineWidth)
plt.annotate(prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100),(left,top), backgroundcolor=color)
plt.imshow(test_img)
###Output
_____no_output_____
###Markdown
Object Detection*Object detection* is a form of computer vision in which a machine learning model is trained to classify individual instances of objects in an image, and indicate a *bounding box* that marks its location. You can think of this as a progression from *image classification* (in which the model answers the question "what is this an image of?") to building solutions where we can ask the model "what objects are in this image, and where are they?".For example, a grocery store might use an object detection model to implement an automated checkout system that scans a conveyor belt using a camera, and can identify specific items without the need to place each item on the belt and scan them individually.The **Custom Vision** cognitive service in Microsoft Azure provides a cloud-based solution for creating and publishing custom object detection models. Create a Custom Vision resourceTo use the Custom Vision service, you need an Azure resource that you can use to train a model, and a resource with which you can publish it for applications to use. You can use the same resource for each of these tasks, or you can use different resources for each to allocate costs separately provided both resources are created in the same region. The resource for either (or both) tasks can be a general **Cognitive Services** resource, or a specific **Custom Vision** resource. Use the following instructions to create a new **Custom Vision** resource (or you can use an existing resource if you have one).1. In a new browser tab, open the Azure portal at [https://portal.azure.com](https://portal.azure.com), and sign in using the Microsoft account associated with your Azure subscription.2. Select the **&65291;Create a resource** button, search for *custom vision*, and create a **Custom Vision** resource with the following settings: - **Create options**: Both - **Subscription**: *Your Azure subscription* - **Resource group**: *Select or create a resource group with a unique name* - **Name**: *Enter a unique name* - **Training location**: *Choose any available region* - **Training pricing tier**: F0 - **Prediction location**: *The same as the training location* - **Prediction pricing tier**: F0 > **Note**: If you already have an F0 custom vision service in your subscription, select **S0** for this one.3. Wait for the resource to be created. Create a Custom Vision projectTo train an object detection model, you need to create a Custom Vision project based on your training resource. To do this, you'll use the Custom Vision portal.1. In a new browser tab, open the Custom Vision portal at [https://customvision.ai](https://customvision.ai), and sign in using the Microsoft account associated with your Azure subscription.2. Create a new project with the following settings: - **Name**: Grocery Detection - **Description**: Object detection for groceries. - **Resource**: *The Custom Vision resource you created previously* - **Project Types**: Object Detection - **Domains**: General3. Wait for the project to be created and opened in the browser. Add and tag imagesTo train an object detection model, you need to upload images that contain the classes you want the model to identify, and tag them to indicate bounding boxes for each object instance.1. Download and extract the training images from https://aka.ms/fruit-objects. The extracted folder contains a collection of images of fruit.2. In the Custom Vision portal, in your object detection project, select **Add images** and upload all of the images in the extracted folder.3. After the images have been uploaded, select the first one to open it.4. Hold the mouse over any object in the image until an automatically detected region is displayed like the image below. Then select the object, and if necessary resize the region to surround it.Alternatively, you can simply drag around the object to create a region.5. When the region surrounds the object, add a new tag with the appropriate object type (*apple*, *banana*, or *orange*) as shown here:6. Select and tag each other object in the image, resizing the regions and adding new tags as required.7. Use the **>** link on the right to go to the next image, and tag its objects. Then just keep working through the entire image collection, tagging each apple, banana, and orange.8. When you have finished tagging the last image, close the **Image Detail** editor and on the **Training Images** page, under **Tags**, select **Tagged** to see all of your tagged images: Train and test a modelNow that you've tagged the images in your project, you're ready to train a model.1. In the Custom Vision project, click **Train** to train an object detection model using the tagged images. Select the **Quick Training** option.2. Wait for training to complete (it might take ten minutes or so), and then review the *Precision*, *Recall*, and *mAP* performance metrics - these measure the prediction accuracy of the classification model, and should all be high.3. At the top right of the page, click **Quick Test**, and then in the **Image URL** box, enter `https://aka.ms/apple-orange` and view the prediction that is generated. Then close the **Quick Test** window. Publish and consume the object detection modelNow you're ready to publish your trained model and use it from a client application.1. At the top left of the **Performance** page, click **&128504; Publish** to publish the trained model with the following settings: - **Model name**: detect-produce - **Prediction Resource**: *Your custom vision **prediction** resource*. (!) Check In Did you use the same model name: **detect-produce**? 2. After publishing, click the *settings* (&9881;) icon at the top right of the **Performance** page to view the project settings. Then, under **General** (on the left), copy the **Project Id**. Scroll down and paste it into the code cell below step 5 replacing **YOUR_PROJECT_ID**. > (*if you used a **Cognitive Services** resource instead of creating a **Custom Vision** resource at the beginning of this exercise, you can copy its key and endpoint from the right side of the project settings, paste it into the code cell below, and run it to see the results. Otherwise, continue completing the steps below to get the key and endpoint for your Custom Vision prediction resource*).3. At the top left of the **Project Settings** page, click the *Projects Gallery* (&128065;) icon to return to the Custom Vision portal home page, where your project is now listed.4. On the Custom Vision portal home page, at the top right, click the *settings* (&9881;) icon to view the settings for your Custom Vision service. Then, under **Resources**, expand your *prediction* resource (not the training resource) and copy its **Key** and **Endpoint** values to the code cell below step 5, replacing **YOUR_KEY** and **YOUR_ENDPOINT**. (!) Check In If you are using a **Custom Vision** resource, did you use the **prediction** resource (not the training resource)?5. Run the code cell below by clicking the Run Cell &9655 button (at the top left of the cell) to set the variables to your project ID, key, and endpoint values.
###Code
project_id = '3e7c7648-d3b6-466a-b7eb-a72991aad08f' # Replace with your project ID
cv_key = 'e7e4ae71082e4e46a08db90da670ffc3' # Replace with your prediction resource primary key
cv_endpoint = 'https://gowcustomvision-prediction.cognitiveservices.azure.com/' # Replace with your prediction resource endpoint
model_name = 'detect-produce2' # this must match the model name you set when publishing your model iteration exactly (including case)!
print('Ready to predict using model {} in project {}'.format(model_name, project_id))
###Output
Ready to predict using model detect-produce2 in project 3e7c7648-d3b6-466a-b7eb-a72991aad08f
###Markdown
Now you can use your key and endpoint with a Custom Vision client to connect to your custom vision object detection model.Run the following code cell, which uses your model to detect individual produce items in an image.> **Note**: Don't worry too much about the details of the code. It uses the Python SDK for the Custom Vision service to submit an image to your model and retrieve predictions for detected objects. Each prediction consists of a class name (*apple*, *banana*, or *orange*) and *bounding box* coordinates that indicate where in the image the predicted object has been detected. The code then uses this information to draw a labelled box around each object on the image.
###Code
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
from msrest.authentication import ApiKeyCredentials
from matplotlib import pyplot as plt
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import os
%matplotlib inline
# Load a test image and get its dimensions
#test_img_file = os.path.join('data', 'object-detection', 'produce.jpg')
test_img_file = os.path.join('data', 'object-detection', 'minBanan.jpg')
test_img = Image.open(test_img_file)
test_img_h, test_img_w, test_img_ch = np.array(test_img).shape
# Get a prediction client for the object detection model
credentials = ApiKeyCredentials(in_headers={"Prediction-key": cv_key})
predictor = CustomVisionPredictionClient(endpoint=cv_endpoint, credentials=credentials)
print('Detecting objects in {} using model {} in project {}...'.format(test_img_file, model_name, project_id))
# Detect objects in the test image
with open(test_img_file, mode="rb") as test_data:
results = predictor.detect_image(project_id, model_name, test_data)
# Create a figure to display the results
fig = plt.figure(figsize=(8, 8))
plt.axis('off')
# Display the image with boxes around each detected object
draw = ImageDraw.Draw(test_img)
lineWidth = int(np.array(test_img).shape[1]/100)
object_colors = {
"eple": "lightgreen",
"banan": "yellow",
"appelsin": "orange"
}
for prediction in results.predictions:
color = 'white' # default for 'other' object tags
if (prediction.probability*100) > 40:
if prediction.tag_name in object_colors:
color = object_colors[prediction.tag_name]
left = prediction.bounding_box.left * test_img_w
top = prediction.bounding_box.top * test_img_h
height = prediction.bounding_box.height * test_img_h
width = prediction.bounding_box.width * test_img_w
points = ((left,top), (left+width,top), (left+width,top+height), (left,top+height),(left,top))
draw.line(points, fill=color, width=lineWidth)
plt.annotate(prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100),(left,top), backgroundcolor=color)
plt.imshow(test_img)
###Output
Detecting objects in data/object-detection/minBanan.jpg using model detect-produce2 in project 3e7c7648-d3b6-466a-b7eb-a72991aad08f...
###Markdown
物体検出
*物体検出*は Computer Vision の機能であり、機械学習モデルをトレーニングすることにより、画像の中にある個々のオブジェクトのインスタンスを分類して、その場所を示す*境界ボックス*を示すことができます。「この画像は何ですか?」という質問にモデルが答える*画像分類*から、「この画像に写っている物体は何ですか、それはどこにありますか?」という質問にモデルが答えるソリューション構築への発展と考えることができます。

たとえば、食料品店では、精算レジのベルト コンベヤーの上に並べられた商品を 1 つずつ読み取らなくても、物体検出モデルを使用した自動精算システムを導入すると、カメラでベルト コンベヤーをスキャンして、それらの商品をまとめて識別することができます。
Microsoft Azure の **Custom Vision** Cognitive Service はクラウドベースのソリューションであり、カスタマイズした物体検出モデルを作成して発行することができます。
Custom Vision リソースを作成する
Custom Vision サービスを使用するには、モデルのトレーニングに使用する Azure リソースと、アプリケーションで使用できるようにモデルを発行するためのリソースが必要です。タスクごとに同じリソースを使用することも、両方のリソースが同じリージョンで作成されていれば、それぞれに異なるリソースを使用してコストを個別に管理することもできます。これらのタスクのリソースは、一般的な **Cognitive Services** リソースであっても、特定の **Custom Vision** リソースであっても構いません。次の手順で、新しい **Custom Vision** リソースを作成します。既存のリソースがある場合は、それを使用しても構いません。
1. ブラウザーの新しいタブで、Azure portal ([https://portal.azure.com](https://portal.azure.com)) を開き、Azure サブスクリプションに関連付けられている Microsoft アカウントでサインインします。
2. 「**&65291;リソースの作成**」 ボタンをクリックし、*Custom Vision* を検索して、以下の設定で **Custom Vision** リソースを作成します。
- **オプションを作成**: 両方
- **サブスクリプション**: *使用する Azure サブスクリプション*
- **リソース グループ**: *一意の名前のリソース グループを選択または作成します*
- **名前**: *一意の名前を入力*
- **トレーニング場所**: *利用可能な任意のリージョンを選択します*
- **トレーニングの価格レベル**: F0
- **予測場所**: *トレーニング場所と同じ*
- **予測の価格レベル**: F0
> **注意**: サブスクリプションに F0 Custom Vision サービスが既にある場合は、「**S0**」 を選択します。
3. リソースが作成されるまで待ちます。
Custom Vision プロジェクトを作成する
物体検出モデルをトレーニングするには、トレーニング リソースに基づいて Custom Vision プロジェクトを作成する必要があります。Custom Vision ポータルを使用します。
1. ブラウザーの新しいタブで、[https://customvision.ai](https://customvision.ai) の Custom Vision ポータルを開き、Azure サブスクリプションに関連付けられている Microsoft アカウントでサインインします。
2. 次の設定で新しいプロジェクトを作成します。
- **名前**: 食料品の検出
- **説明**: 食料品の物体検出
- **リソース**: *事前に作成した Custom Vision リソース*
- **プロジェクトのタイプ**: 物体検出
- **ドメイン**: 全般
3. プロジェクトが作成され、ブラウザーが開くのを待ちます。
画像を追加してタグ付けする
物体検出モデルをトレーニングするには、検出モデルで識別したい分類を含む画像をアップロードし、各オブジェクトのインスタンスの境界ボックスを示すようにタグを付ける必要があります。
1. https://aka.ms/fruit-objects からトレーニング画像をダウンロードして抽出します。抽出したフォルダーには、果物の画像のコレクションが含まれています。**注:** 一時的な回避策として、トレーニングイメージにアクセスできない場合は、https://www.github.com にアクセスしてから、https://aka.ms/fruit-objects にアクセスしてください。
2. Custom Vision ポータル [https://customvision.ai](https://customvision.ai) で、物体検出プロジェクトの _Grocery Detection_ を選択していることを確認します。次に、「**画像の追加**」 を選択し、抽出したフォルダーにすべての画像をアップロードします。

3. 画像をアップロードしたら、1 つ目の画像を選択して開きます。
4. 自動的に検出された領域が次の画像のように表示されるまで、画像に写っている任意のオブジェクトの上にマウスを置きます。該当オブジェクトを選択し、必要に応じて、範囲のサイズを変更して、そのオブジェクトを囲みます。

または、オブジェクトをドラッグして範囲を作成することもできます。
5. オブジェクトが囲まれていれば、次に示すように、適切なオブジェクト タイプ (*りんご*、*バナナ*、*みかん*など) で新しいタグを追加します。

6. 画像に写っている各オブジェクトを選択してタグを付け、範囲のサイズを変更し、必要に応じて新しいタグを追加します。

7. 右側の **>** リンクを使用して次の画像に移動し、そこに含まれるオブジェクトにタグを付けます。画像コレクション全体で、りんご、バナナ、みかんにタグを付ける処理を続けます。
8. 最後の画像のタグ付けが終了したら、「**画像詳細**」 エディタを閉じ、「**トレーニング画像**」 ページの 「**タグ**」 で 「**タグ付き**」 を選択して、タグ付けされたすべての画像を表示します。

モデルのトレーニングとテスト
プロジェクト内の画像にタグを付けたので、モデルをトレーニングする準備が整いました。
1. Custom Vision プロジェクトで 「**トレーニング**」 をクリックして、タグ付けした画像を使用して物体検出モデルをトレーニングします。**クイック トレーニング** オプションを選択します。
2. トレーニングが完了するのを待ってから (10 分ほどかかる場合があります)、*正確性*、*再現性*、*mAP* などのパフォーマンス指標を確認します。これらは分類モデルの予測精度の指標であり、すべて高い値を示しているはずです。
3. ページの右上にある 「**クイック テスト**」 をクリックし、「**画像の URL**」 ボックスに`https://aka.ms/apple-orange` と入力して、生成された予測を表示します。「**クイック テスト**」 ウィンドウを閉じます。
物体検出モデルを発行して利用する
トレーニングしたモデルを発行してクライアント アプリケーションから使用する準備が整いました。
1. 「**パフォーマンス**」 ページの左上にある 「**&128504; 発行**」 をクリックして、次の設定でトレーニングしたモデルを発行します。
- **モデル名**: 農産物検出
- **予測リソース**: *Custom Vision の**予測**リソース*
(!)確認
同じモデル名「**農産物検出**」を使用しましたか?
2. 発行したら 「**パフォーマンス**」 ページの右上にある 「*設定*」 (&9881;) アイコンをクリックして、プロジェクトの設定を表示します。左側の 「**全般**」 で、**プロジェクト ID** をコピーします。下にスクロールして、以下のステップ 5 のコード セルに貼り付けます (**YOUR_PROJECT_ID** と置き換える)。
> (*この演習の最初に **Custom Vision** リソースを作成する代わりに **Cognitive Services** リソースを使用した場合は、プロジェクト設定の右側からキーとエンドポイントをコピーして、下のコード セルに貼り付けて実行すると、結果を確認できます。それ以外の場合は、以下の手順を続行して、Custom Vision 予測リソースのキーとエンドポイントを取得してください*。)
3. 「**プロジェクト設定**」 ページの左上にある 「*プロジェクト ギャラリー*」 (&128065;) アイコンをクリックして、プロジェクトが一覧表示されている Custom Vision ポータルのホームページに戻ります。
4. Custom Vision ポータルのホームページの右上にある 「*設定*」 (&9881;) アイコンをクリックして、Custom Vision サービスの設定を表示します。「**リソース**」 で*予測*リソース (トレーニング リソースではありません) を展開し、その 「**キー**」 と 「**エンドポイント**」 の値をコピーして、以下の手順 5 のコード セルに貼り付けます (**YOUR_KEY**、**YOUR_ENDPOINT** とそれぞれ置き換える)。
(!)確認
**Custom Vision** リソースを使用している場合、**予測**リソースを使用しましたか (トレーニング リソースではありません)?
5. セルの左上にある 「セルの実行」 &9655; ボタンをクリックして次のコード セルを実行し、変数にプロジェクト ID、キー、およびエンドポイント値を設定します。
###Code
project_id = 'YOUR_PROJECT_ID' # Replace with your project ID
cv_key = 'YOUR_KEY' # Replace with your prediction resource primary key
cv_endpoint = 'YOUR_ENDPOINT' # Replace with your prediction resource endpoint
model_name = 'detect-produce' # this must match the model name you set when publishing your model iteration exactly (including case)!
print('Ready to predict using model {} in project {}'.format(model_name, project_id))
###Output
_____no_output_____
###Markdown
これで、キーとエンドポイントを Custom Vision クライアントで使用して、Custom Vision 物体検出モデルに接続できます。
以下のコード セルを実行します。このコード セルは、検出モデルを使用して画像の中にある個々の農産物アイテムを検出します。
> **注**: コードの詳細についてはあまり気にしないでください。Computer Vision サービス用の Python SDK を使用して、モデルに画像を送信し、検出した物体の予測を取得します。各予測は、クラス名 (*りんご*、*バナナ*、*みかん*) と、予測した物体が画像の中のどこで検出されたかを示す*境界ボックス*の座標で構成されます。この情報を使用して、画像上の各オブジェクトの周囲にラベル付きのボックスを描画します。
###Code
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
from msrest.authentication import ApiKeyCredentials
from matplotlib import pyplot as plt
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import os
%matplotlib inline
# Load a test image and get its dimensions
test_img_file = os.path.join('data', 'object-detection', 'produce.jpg')
test_img = Image.open(test_img_file)
test_img_h, test_img_w, test_img_ch = np.array(test_img).shape
# Get a prediction client for the object detection model
credentials = ApiKeyCredentials(in_headers={"Prediction-key": cv_key})
predictor = CustomVisionPredictionClient(endpoint=cv_endpoint, credentials=credentials)
print('Detecting objects in {} using model {} in project {}...'.format(test_img_file, model_name, project_id))
# Detect objects in the test image
with open(test_img_file, mode="rb") as test_data:
results = predictor.detect_image(project_id, model_name, test_data)
# Create a figure to display the results
fig = plt.figure(figsize=(8, 8))
plt.axis('off')
# Display the image with boxes around each detected object
draw = ImageDraw.Draw(test_img)
lineWidth = int(np.array(test_img).shape[1]/100)
object_colors = {
"apple": "lightgreen",
"banana": "yellow",
"orange": "orange"
}
for prediction in results.predictions:
color = 'white' # default for 'other' object tags
if (prediction.probability*100) > 50:
if prediction.tag_name in object_colors:
color = object_colors[prediction.tag_name]
left = prediction.bounding_box.left * test_img_w
top = prediction.bounding_box.top * test_img_h
height = prediction.bounding_box.height * test_img_h
width = prediction.bounding_box.width * test_img_w
points = ((left,top), (left+width,top), (left+width,top+height), (left,top+height),(left,top))
draw.line(points, fill=color, width=lineWidth)
plt.annotate(prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100),(left,top), backgroundcolor=color)
plt.imshow(test_img)
###Output
_____no_output_____
###Markdown
Object Detection
La *detección de objetos* es una forma de Computer Vision en la que un modelo de aprendizaje automático se entrena para clasificar instancias individuales de objetos en una imagen e incluir un *cuadro de límite* que marque su ubicación. Se puede considerar una evolución de la *clasificación de imágenes* (en la que el modelo responde a la pregunta “¿de qué es esta imagen?”) hacia la creación de soluciones en las que el modelo responda a la pregunta “¿qué objetos aparecen en esta imagen y dónde están?”.

Por ejemplo, una tienda podría usar un modelo de detección de objetos para implementar un sistema de pago automático que escanee una cinta transportadora con una cámara y que identifique los artículos sobre ella sin tener que colocar cada uno de ellos y escanearlos individualmente.
**Custom Vision**, de Cognitive Services en Microsoft Azure, es una solución basada en la nube para crear y publicar modelos personalizados de detección de objetos.
Crear un recurso de Custom Vision
Para usar el servicio Custom Vision, necesita un recurso de Azure que pueda usar para entrenar un modelo, y un recurso con el que publicarlo para que las aplicaciones lo usen. Puede usar el mismo recurso para cada una de estas tareas o usar recursos diferentes para que se asignen los costes de forma independiente, siempre que los recursos se hayan creado en la misma región. El recurso de cada una (o ambas) de las tareas pueden ser un recurso general de **Cognitive Services** o un recurso específico de **Custom Vision**. Use estas instrucciones para crear un nuevo recurso de **Custom Vision** (o puede usar un recurso existente, si ya tiene uno).
1. En una nueva pestaña del navegador, abra Azure Portal ([https://portal.azure.com](https://portal.azure.com)) e inicie sesión con la cuenta de Microsoft asociada a su suscripción de Azure.
2. Haga clic en el botón **&65291;Crear un recurso**, busque *Custom Vision* y cree un recurso de **Custom Vision** con esta configuración:
- **Opciones de creación**: ambas
- **Suscripción**: *su suscripción de Azure*
- **Grupo de recursos**: *seleccione o cree un grupo de recursos con un nombre único*
- **Nombre**: *escriba un nombre único*
- **Ubicación de entrenamiento**: *seleccione cualquier región disponible*
- **Plan de tarifa de entrenamiento**: F0
- **Ubicación de la predicción**: *la misma que la ubicación de entrenamiento*
- **Plan de tarifa de predicción**: F0
> **Nota**: Si ya tiene un servicio Custom Vision F0 en su suscripción, seleccione **S0** en este caso.
3. Espere a que el recurso se cree.
Crear un proyecto de Custom Vision
Para entrenar un modelo de detección de objetos, debe crear un proyecto de Custom Vision basado en su recurso de entrenamiento. Para hacerlo, debe usar el portal de Custom Vision.
1. En una nueva pestaña del navegador, abra el portal de Custom Vision ([https://customvision.ai](https://customvision.ai)) e inicie sesión con la cuenta de Microsoft asociada a su suscripción de Azure.
2. Cree un nuevo proyecto con la siguiente configuración:
- **Nombre**: Grocery Detection
- **Description**: Object detection for groceries.
- **Resource**: *The Custom Vision resource you created previously*
- **Project Types**: Object Detection
- **Domains**: General
3. Espere a que se cree el proyecto y se abra en el navegador.
Agregar y etiquetar imágenes
Para entrenar un modelo de detección de objetos, debe cargar imágenes que contengan las clases el modelo tendrá que identificar y etiquetarlas para indicar los cuadros de límite de cada instancia de objeto.
1. Descargue y extraiga las imágenes de entrenamiento de https://aka.ms/fruit-objects. La carpeta extraída contiene una recopilación de imágenes de fruta. **Nota:** Como solución alternativa temporal, si no puede acceder a las imágenes del curso, vaya a https://www.github.com y luego a https://aka.ms/fruit-objects.
2. En el portal de Custom Vision [https://customvision.ai](https://customvision.ai), compruebe que trabaja con el proyecto de detección de objetos _Grocery Detection_. Después, seleccione **Add images** y cargue todas las imágenes de la carpeta extraída.

3. Una vez cargadas las imágenes, seleccione la primera para abrirla.
4. Pase el cursor sobre cualquier objeto de la imagen hasta que aparezca una región detectada automáticamente, tal y como aparece en la imagen siguiente. Después, seleccione el objeto y, si es necesario, cambie el tamaño de la región para rodearlo.

De forma alternativa, puede arrastrar el cursor alrededor del objeto para crear una región.
5. Cuando la región rodee el objeto, agregue una nueva etiqueta con el tipo de objeto adecuado (*apple*, *banana* u *orange*), tal y como aparece aquí:

6. Seleccione y etiquete cada objeto de la imagen, modifique el tamaño de las regiones y agregue nuevas etiquetas si es necesario.

7. Use el vínculo **>** de la derecha para pasar a la siguiente imagen y etiquetar sus objetos. Haga lo mismo con el resto de imágenes, etiquete cada manzana, banana y naranja.
8. Cuando haya terminado de etiquetar la última imagen, cierre el editor **Image Detail** y, en la página **Training Images**, en **Tags**, seleccione **Tagged** para ver todas las imágenes etiquetadas:

Entrenar y probar un modelo
Ahora que hemos etiquetado las imágenes del proyecto, es hora de entrenar un modelo.
1. En el proyecto de Custom Vision, haga clic en **Train** para entrenar un modelo de detección de objetos con las imágenes etiquetadas. Seleccione la opción **Quick Training**.
2. Espere a que se complete el entrenamiento (puede tardar alrededor de 10 minutos) y compruebe las métricas de rendimiento *Precision*, *Recall* y *mAP* (miden la precisión de la predicción del modelo de clasificación y sus valores deberían ser altos).
3. En la esquina superior derecha de la página, haga clic en **Quick Test** y, después, en el cuadro **URL de imagen**, escriba `https://aka.ms/apple-orange` y vea la predicción que se obtiene. Después, cierre la ventana **Quick Test**.
Publicar y consumir el modelo de detección de objetos
Ya puede publicar su modelo entrenado y usarlo desde una aplicación cliente.
1. En la parte superior izquierda de la página **Performance**, haga clic en **&128504; Publish** para publicar el modelo entrenado con la siguiente configuración:
- **Model name**: detect-produce
- **Prediction Resource**: *Su recurso de **predicción** de Custom Vision*
(!) Comprobar
¿Ha usado el mismo nombre de modelo: **detect-produce**?
2. Después de publicarlo, haga clic en el icono *Settings* (&9881;) en la esquina superior derecha de la página **Performance** para ver la configuración del proyecto. Después, en **General** (a la izquierda), copie el **Project Id**. Vaya hacia abajo y péguelo en la celda de código debajo del paso 5, en sustitución de **YOUR_PROJECT_ID**.
> (*Si usó un recurso de **Cognitive Services** en lugar de crear un recurso de **Custom Vision** al principio del ejercicio, puede copiar su clave y punto de conexión desde el lado derecho de la configuración del proyecto, péguelos en la celda de código que aparece más abajo y ejecútela para ver los resultados. Si no es así, siga los pasos que quedan para obtener la clave y el punto de conexión de su recurso de predicción de Custom Vision*).
3. En la parte superior izquierda de la página **Project Settings**, haga clic en el icono *Projects Gallery* (&128065;) para volver a la página principal del portal de Custom Vision, donde debería aparecer su proyecto.
4. En la página principal del portal de Custom Vision, en la esquina superior derecha, haga clic en el icono *Settings* (&9881;) para ver la configuración de su servicio Custom Vision. Después, en **Resources**, abra su recurso de *predicción* (no el recurso de entrenamiento) y copie sus valores para **Key** y **Endpoint** en la celda de código que aparece debajo del paso 5, en sustitución de **YOUR_KEY** y **YOUR_ENDPOINT**.
(!) Comprobar
Si utiliza un recurso de **Custom Vision**, ¿ha usado el recurso de **predicción** (y no el recurso de entrenamiento)?
5. Para establecer las variables de su ID de proyecto, clave y punto de conexión, haga clic en el botón Run Cell &9655; (en la parte superior izquierda de la celda siguiente) y ejecute su código.
###Code
project_id = 'YOUR_PROJECT_ID' # Replace with your project ID
cv_key = 'YOUR_KEY' # Replace with your prediction resource primary key
cv_endpoint = 'YOUR_ENDPOINT' # Replace with your prediction resource endpoint
model_name = 'detect-produce' # this must match the model name you set when publishing your model iteration exactly (including case)!
print('Ready to predict using model {} in project {}'.format(model_name, project_id))
###Output
_____no_output_____
###Markdown
Ahora, puede usar su clave y punto de conexión con un cliente de Custom Vision para conectarse a su modelo de detección de objetos de Custom Vision.
Ejecute la siguiente celda de código, que utiliza su modelo para detectar artículos individuales en una imagen.
> **Nota**: No se preocupe demasiado por los detalles del código. Utiliza el SDK de Python para que el servicio Custom Vision envíe una imagen a su modelo y recupere predicciones de los objetos detectados. Cada predicción tiene un nombre de clase (en este ejemplo *apple*, *banana* u *orange*) y las coordenadas del *cuadro de límite* que indican en qué parte de la imagen se ha detectado el objeto. Después, el código usa esta información para crear un cuadro etiquetado alrededor de cada objeto de la imagen.
###Code
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
from msrest.authentication import ApiKeyCredentials
from matplotlib import pyplot as plt
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import os
%matplotlib inline
# Load a test image and get its dimensions
test_img_file = os.path.join('data', 'object-detection', 'produce.jpg')
test_img = Image.open(test_img_file)
test_img_h, test_img_w, test_img_ch = np.array(test_img).shape
# Get a prediction client for the object detection model
credentials = ApiKeyCredentials(in_headers={"Prediction-key": cv_key})
predictor = CustomVisionPredictionClient(endpoint=cv_endpoint, credentials=credentials)
print('Detecting objects in {} using model {} in project {}...'.format(test_img_file, model_name, project_id))
# Detect objects in the test image
with open(test_img_file, mode="rb") as test_data:
results = predictor.detect_image(project_id, model_name, test_data)
# Create a figure to display the results
fig = plt.figure(figsize=(8, 8))
plt.axis('off')
# Display the image with boxes around each detected object
draw = ImageDraw.Draw(test_img)
lineWidth = int(np.array(test_img).shape[1]/100)
object_colors = {
"apple": "lightgreen",
"banana": "yellow",
"orange": "orange"
}
for prediction in results.predictions:
color = 'white' # default for 'other' object tags
if (prediction.probability*100) > 50:
if prediction.tag_name in object_colors:
color = object_colors[prediction.tag_name]
left = prediction.bounding_box.left * test_img_w
top = prediction.bounding_box.top * test_img_h
height = prediction.bounding_box.height * test_img_h
width = prediction.bounding_box.width * test_img_w
points = ((left,top), (left+width,top), (left+width,top+height), (left,top+height),(left,top))
draw.line(points, fill=color, width=lineWidth)
plt.annotate(prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100),(left,top), backgroundcolor=color)
plt.imshow(test_img)
###Output
_____no_output_____
###Markdown
Object Detection*Object detection* is a form of computer vision in which a machine learning model is trained to classify individual instances of objects in an image, and indicate a *bounding box* that marks its location. You can think of this as a progression from *image classification* (in which the model answers the question "what is this an image of?") to building solutions where we can ask the model "what objects are in this image, and where are they?".For example, a grocery store might use an object detection model to implement an automated checkout system that scans a conveyor belt using a camera, and can identify specific items without the need to place each item on the belt and scan them individually.The **Custom Vision** cognitive service in Microsoft Azure provides a cloud-based solution for creating and publishing custom object detection models. Create a Custom Vision resourceTo use the Custom Vision service, you need an Azure resource that you can use to train a model, and a resource with which you can publish it for applications to use. You can use the same resource for each of these tasks, or you can use different resources for each to allocate costs separately provided both resources are created in the same region. The resource for either (or both) tasks can be a general **Cognitive Services** resource, or a specific **Custom Vision** resource. Use the following instructions to create a new **Custom Vision** resource (or you can use an existing resource if you have one).1. In a new browser tab, open the Azure portal at [https://portal.azure.com](https://portal.azure.com), and sign in using the Microsoft account associated with your Azure subscription.2. Select the **&65291;Create a resource** button, search for *custom vision*, and create a **Custom Vision** resource with the following settings: - **Create options**: Both - **Subscription**: *Your Azure subscription* - **Resource group**: *Select or create a resource group with a unique name* - **Name**: *object-deploymentID* - **Training location**: *Choose any available region* - **Training pricing tier**: F0 - **Prediction location**: *The same as the training location* - **Prediction pricing tier**: F0 > **Note**: If you already have an F0 custom vision service in your subscription, select **S0** for this one.3. Wait for the resource to be created. Create a Custom Vision projectTo train an object detection model, you need to create a Custom Vision project based on your training resource. To do this, you'll use the Custom Vision portal.1. In a new browser tab, open the Custom Vision portal at [https://customvision.ai](https://customvision.ai), and sign in using the Microsoft account associated with your Azure subscription.2. Create a new project with the following settings: - **Name**: Grocery Detection - **Description**: Object detection for groceries. - **Resource**: *The Custom Vision resource you created previously* - **Project Types**: Object Detection - **Domains**: General3. Wait for the project to be created and opened in the browser. Add and tag imagesTo train an object detection model, you need to upload images that contain the classes you want the model to identify, and tag them to indicate bounding boxes for each object instance.1. Download and extract the training images from https://aka.ms/fruit-objects. The extracted folder contains a collection of images of fruit.2. In the Custom Vision portal, in your object detection project, select **Add images** and upload all of the images in the extracted folder.3. After the images have been uploaded, select the first one to open it.4. Hold the mouse over any object in the image until an automatically detected region is displayed like the image below. Then select the object, and if necessary resize the region to surround it.Alternatively, you can simply drag around the object to create a region.5. When the region surrounds the object, add a new tag with the appropriate object type (*apple*, *banana*, or *orange*) as shown here:6. Select and tag each other object in the image, resizing the regions and adding new tags as required.7. Use the **>** link on the right to go to the next image, and tag its objects. Then just keep working through the entire image collection, tagging each apple, banana, and orange.8. When you have finished tagging the last image, close the **Image Detail** editor and on the **Training Images** page, under **Tags**, select **Tagged** to see all of your tagged images: Train and test a modelNow that you've tagged the images in your project, you're ready to train a model.1. In the Custom Vision project, click **Train** to train an object detection model using the tagged images. Select the **Quick Training** option.2. Wait for training to complete (it might take ten minutes or so), and then review the *Precision*, *Recall*, and *mAP* performance metrics - these measure the prediction accuracy of the classification model, and should all be high.3. At the top right of the page, click **Quick Test**, and then in the **Image URL** box, enter `https://aka.ms/apple-orange` and view the prediction that is generated. Then close the **Quick Test** window. Publish and consume the object detection modelNow you're ready to publish your trained model and use it from a client application.1. At the top left of the **Performance** page, click **&128504; Publish** to publish the trained model with the following settings: - **Model name**: detect-produce - **Prediction Resource**: *Your custom vision **prediction** resource*. (!) Check In Did you use the same model name: **detect-produce**? 2. After publishing, click the *settings* (&9881;) icon at the top right of the **Performance** page to view the project settings. Then, under **General** (on the left), copy the **Project Id**. Scroll down and paste it into the code cell below step 5 replacing **YOUR_PROJECT_ID**. > (*if you used a **Cognitive Services** resource instead of creating a **Custom Vision** resource at the beginning of this exercise, you can copy its key and endpoint from the right side of the project settings, paste it into the code cell below, and run it to see the results. Otherwise, continue completing the steps below to get the key and endpoint for your Custom Vision prediction resource*).3. At the top left of the **Project Settings** page, click the *Projects Gallery* (&128065;) icon to return to the Custom Vision portal home page, where your project is now listed.4. On the Custom Vision portal home page, at the top right, click the *settings* (&9881;) icon to view the settings for your Custom Vision service. Then, under **Resources**, expand your *prediction* resource (not the training resource) and copy its **Key** and **Endpoint** values to the code cell below step 5, replacing **YOUR_KEY** and **YOUR_ENDPOINT**. (!) Check In If you are using a **Custom Vision** resource, did you use the **prediction** resource (not the training resource)?5. Run the code cell below by clicking the Run Cell &9655 button (at the top left of the cell) to set the variables to your project ID, key, and endpoint values.
###Code
project_id = 'YOUR_PROJECT_ID' # Replace with your project ID
cv_key = 'YOUR_KEY' # Replace with your prediction resource primary key
cv_endpoint = 'YOUR_ENDPOINT' # Replace with your prediction resource endpoint
model_name = 'detect-produce' # this must match the model name you set when publishing your model iteration exactly (including case)!
print('Ready to predict using model {} in project {}'.format(model_name, project_id))
###Output
_____no_output_____
###Markdown
Now you can use your key and endpoint with a Custom Vision client to connect to your custom vision object detection model.Run the following code cell, which uses your model to detect individual produce items in an image.> **Note**: Don't worry too much about the details of the code. It uses the Python SDK for the Custom Vision service to submit an image to your model and retrieve predictions for detected objects. Each prediction consists of a class name (*apple*, *banana*, or *orange*) and *bounding box* coordinates that indicate where in the image the predicted object has been detected. The code then uses this information to draw a labelled box around each object on the image.
###Code
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
from msrest.authentication import ApiKeyCredentials
from matplotlib import pyplot as plt
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import os
%matplotlib inline
# Load a test image and get its dimensions
test_img_file = os.path.join('data', 'object-detection', 'produce.jpg')
test_img = Image.open(test_img_file)
test_img_h, test_img_w, test_img_ch = np.array(test_img).shape
# Get a prediction client for the object detection model
credentials = ApiKeyCredentials(in_headers={"Prediction-key": cv_key})
predictor = CustomVisionPredictionClient(endpoint=cv_endpoint, credentials=credentials)
print('Detecting objects in {} using model {} in project {}...'.format(test_img_file, model_name, project_id))
# Detect objects in the test image
with open(test_img_file, mode="rb") as test_data:
results = predictor.detect_image(project_id, model_name, test_data)
# Create a figure to display the results
fig = plt.figure(figsize=(8, 8))
plt.axis('off')
# Display the image with boxes around each detected object
draw = ImageDraw.Draw(test_img)
lineWidth = int(np.array(test_img).shape[1]/100)
object_colors = {
"apple": "lightgreen",
"banana": "yellow",
"orange": "orange"
}
for prediction in results.predictions:
color = 'white' # default for 'other' object tags
if (prediction.probability*100) > 50:
if prediction.tag_name in object_colors:
color = object_colors[prediction.tag_name]
left = prediction.bounding_box.left * test_img_w
top = prediction.bounding_box.top * test_img_h
height = prediction.bounding_box.height * test_img_h
width = prediction.bounding_box.width * test_img_w
points = ((left,top), (left+width,top), (left+width,top+height), (left,top+height),(left,top))
draw.line(points, fill=color, width=lineWidth)
plt.annotate(prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100),(left,top), backgroundcolor=color)
plt.imshow(test_img)
###Output
_____no_output_____
###Markdown
Object Detection*Object detection* is a form of computer vision in which a machine learning model is trained to classify individual instances of objects in an image, and indicate a *bounding box* that marks its location. You can think of this as a progression from *image classification* (in which the model answers the question "what is this an image of?") to building solutions where we can ask the model "what objects are in this image, and where are they?".For example, a grocery store might use an object detection model to implement an automated checkout system that scans a conveyor belt using a camera, and can identify specific items without the need to place each item on the belt and scan them individually.The **Custom Vision** cognitive service in Microsoft Azure provides a cloud-based solution for creating and publishing custom object detection models. Create a Custom Vision resourceTo use the Custom Vision service, you need an Azure resource that you can use to train a model, and a resource with which you can publish it for applications to use. You can use the same resource for each of these tasks, or you can use different resources for each to allocate costs separately provided both resources are created in the same region. The resource for either (or both) tasks can be a general **Cognitive Services** resource, or a specific **Custom Vision** resource. Use the following instructions to create a new **Custom Vision** resource (or you can use an existing resource if you have one).1. In a new browser tab, open the Azure portal at [https://portal.azure.com](https://portal.azure.com), and sign in using the lab credentials.2. Select the **&65291;Create a resource** button, search for *custom vision*, and create a **Custom Vision** resource with the following settings: - **Create options**: Both - **Subscription**: *Select the existing subscription where you are performing the lab*. - **Resource group**: *Select the existing resource group.* - **Name**: *objdet-uniqueID*, You can find the uniqueID value in the Lab Environment-> Environment details tab. - **Training location**: *Choose any available region* - **Training pricing tier**: F0 - **Prediction location**: *The same as the training location* - **Prediction pricing tier**: F0 > **Note**: If you already have an F0 custom vision service in your subscription, select **S0** for this one.3. Wait for the resource to be created. Create a Custom Vision projectTo train an object detection model, you need to create a Custom Vision project based on your training resource. To do this, you'll use the Custom Vision portal.1. In a new browser tab, open the Custom Vision portal at [https://customvision.ai](https://customvision.ai), and sign in using the lab credentials.2. Create a new project with the following settings: - **Name**: Grocery Detection - **Description**: Object detection for groceries. - **Resource**: *From the drop down menu, select the Custom Vision resource you created previously* - **Project Types**: Object Detection - **Domains**: General3. Wait for the project to be created and opened in the browser. Add and tag imagesTo train an object detection model, you need to upload images that contain the classes you want the model to identify, and tag them to indicate bounding boxes for each object instance.1. Download and extract the training images from https://aka.ms/fruit-objects. The extracted folder contains a collection of images of fruit.2. In the Custom Vision portal, in your object detection project, select **Add images** and upload all of the images in the extracted folder.3. After the images have been uploaded, select the first one to open it.4. Hold the mouse over any object in the image until an automatically detected region is displayed like the image below. Then select the object, and if necessary resize the region to surround it.Alternatively, you can simply drag around the object to create a region.5. When the region surrounds the object, add a new tag with the appropriate object type (*apple*, *banana*, or *orange*) as shown here:6. Select and tag each other object in the image, resizing the regions and adding new tags as required.7. Use the **>** link on the right to go to the next image, and tag its objects. Then just keep working through the entire image collection, tagging each apple, banana, and orange.8. When you have finished tagging the last image, close the **Image Detail** editor and on the **Training Images** page, under **Tags**, select **Tagged** to see all of your tagged images: Train and test a modelNow that you've tagged the images in your project, you're ready to train a model.1. In the Custom Vision project, click **Train** to train an object detection model using the tagged images. Select the **Quick Training** option.2. Wait for training to complete (it might take ten minutes or so), and then review the *Precision*, *Recall*, and *mAP* performance metrics - these measure the prediction accuracy of the classification model, and should all be high.3. At the top right of the page, click **Quick Test**, and then in the **Image URL** box, enter `https://aka.ms/apple-orange` and view the prediction that is generated. Then close the **Quick Test** window. Publish and consume the object detection modelNow you're ready to publish your trained model and use it from a client application.1. At the top left of the **Performance** page, click **&128504; Publish** to publish the trained model with the following settings: - **Model name**: detect-produce - **Prediction Resource**: *Your custom vision **prediction** resource*. (!) Check In Did you use the same model name: **detect-produce**? 2. After publishing, click the *settings* (&9881;) icon at the top right of the **Performance** page to view the project settings. Then, under **General** (on the left), copy the **Project Id**. Scroll down and paste it into the code cell below step 5 replacing **YOUR_PROJECT_ID**. > (*if you used a **Cognitive Services** resource instead of creating a **Custom Vision** resource at the beginning of this exercise, you can copy its key and endpoint from the right side of the project settings, paste it into the code cell below, and run it to see the results. Otherwise, continue completing the steps below to get the key and endpoint for your Custom Vision prediction resource*).3. At the top left of the **Project Settings** page, click the *Projects Gallery* (&128065;) icon to return to the Custom Vision portal home page, where your project is now listed.4. On the Custom Vision portal home page, at the top right, click the *settings* (&9881;) icon to view the settings for your Custom Vision service. Then, under **Resources**, expand your *prediction* resource (not the training resource) and copy its **Key** and **Endpoint** values to the code cell below step 5, replacing **YOUR_KEY** and **YOUR_ENDPOINT**. (!) Check In If you are using a **Custom Vision** resource, did you use the **prediction** resource (not the training resource)?5. Run the code cell below by clicking the Run Cell &9655 button (at the top left of the cell) to set the variables to your project ID, key, and endpoint values.
###Code
#Replace the values YOUR_PROJECT_ID, YOUR_KEY and YOUR_ENDPOINT
project_id = 'YOUR_PROJECT_ID' # Replace with your project ID
cv_key = 'YOUR_KEY' # Replace with your prediction resource primary key
cv_endpoint = 'YOUR_ENDPOINT' # Replace with your prediction resource endpoint
model_name = 'detect-produce' # this must match the model name you set when publishing your model iteration exactly (including case)!
print('Ready to predict using model {} in project {}'.format(model_name, project_id))
###Output
_____no_output_____
###Markdown
Now you can use your key and endpoint with a Custom Vision client to connect to your custom vision object detection model.Run the following code cell, which uses your model to detect individual produce items in an image.> **Note**: Don't worry too much about the details of the code. It uses the Python SDK for the Custom Vision service to submit an image to your model and retrieve predictions for detected objects. Each prediction consists of a class name (*apple*, *banana*, or *orange*) and *bounding box* coordinates that indicate where in the image the predicted object has been detected. The code then uses this information to draw a labelled box around each object on the image.
###Code
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
from msrest.authentication import ApiKeyCredentials
from matplotlib import pyplot as plt
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import os
%matplotlib inline
# Load a test image and get its dimensions
test_img_file = os.path.join('data', 'object-detection', 'produce.jpg')
test_img = Image.open(test_img_file)
test_img_h, test_img_w, test_img_ch = np.array(test_img).shape
# Get a prediction client for the object detection model
credentials = ApiKeyCredentials(in_headers={"Prediction-key": cv_key})
predictor = CustomVisionPredictionClient(endpoint=cv_endpoint, credentials=credentials)
print('Detecting objects in {} using model {} in project {}...'.format(test_img_file, model_name, project_id))
# Detect objects in the test image
with open(test_img_file, mode="rb") as test_data:
results = predictor.detect_image(project_id, model_name, test_data)
# Create a figure to display the results
fig = plt.figure(figsize=(8, 8))
plt.axis('off')
# Display the image with boxes around each detected object
draw = ImageDraw.Draw(test_img)
lineWidth = int(np.array(test_img).shape[1]/100)
object_colors = {
"apple": "lightgreen",
"banana": "yellow",
"orange": "orange"
}
for prediction in results.predictions:
color = 'white' # default for 'other' object tags
if (prediction.probability*100) > 50:
if prediction.tag_name in object_colors:
color = object_colors[prediction.tag_name]
left = prediction.bounding_box.left * test_img_w
top = prediction.bounding_box.top * test_img_h
height = prediction.bounding_box.height * test_img_h
width = prediction.bounding_box.width * test_img_w
points = ((left,top), (left+width,top), (left+width,top+height), (left,top+height),(left,top))
draw.line(points, fill=color, width=lineWidth)
plt.annotate(prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100),(left,top), backgroundcolor=color)
plt.imshow(test_img)
###Output
_____no_output_____
###Markdown
Object Detection*Object detection* is a form of computer vision in which a machine learning model is trained to classify individual instances of objects in an image, and indicate a *bounding box* that marks its location. You can think of this as a progression from *image classification* (in which the model answers the question "what is this an image of?") to building solutions where we can ask the model "what objects are in this image, and where are they?".For example, a grocery store might use an object detection model to implement an automated checkout system that scans a conveyor belt using a camera, and can identify specific items without the need to place each item on the belt and scan them individually.The **Custom Vision** cognitive service in Microsoft Azure provides a cloud-based solution for creating and publishing custom object detection models. Create a Custom Vision resourceTo use the Custom Vision service, you need an Azure resource that you can use to train a model, and a resource with which you can publish it for applications to use. You can use the same resource for each of these tasks, or you can use different resources for each to allocate costs separately provided both resources are created in the same region. The resource for either (or both) tasks can be a general **Cognitive Services** resource, or a specific **Custom Vision** resource. Use the following instructions to create a new **Custom Vision** resource (or you can use an existing resource if you have one).1. In a new browser tab, open the Azure portal at [https://portal.azure.com](https://portal.azure.com), and sign in using the Microsoft account associated with your Azure subscription.2. Select the **&65291;Create a resource** button, search for *custom vision*, and create a **Custom Vision** resource with the following settings: - **Create options**: Both - **Subscription**: *Your Azure subscription* - **Resource group**: *Select or create a resource group with a unique name* - **Name**: *Enter a unique name* - **Training location**: *Choose any available region* - **Training pricing tier**: F0 - **Prediction location**: *The same as the training location* - **Prediction pricing tier**: F0 > **Note**: If you already have an F0 custom vision service in your subscription, select **S0** for this one.3. Wait for the resource to be created. Create a Custom Vision projectTo train an object detection model, you need to create a Custom Vision project based on your training resource. To do this, you'll use the Custom Vision portal.1. In a new browser tab, open the Custom Vision portal at [https://customvision.ai](https://customvision.ai), and sign in using the Microsoft account associated with your Azure subscription.2. Create a new project with the following settings: - **Name**: Grocery Detection - **Description**: Object detection for groceries. - **Resource**: *The Custom Vision resource you created previously* - **Project Types**: Object Detection - **Domains**: General3. Wait for the project to be created and opened in the browser. Add and tag imagesTo train an object detection model, you need to upload images that contain the classes you want the model to identify, and tag them to indicate bounding boxes for each object instance.1. Download and extract the training images from https://aka.ms/fruit-objects. The extracted folder contains a collection of images of fruit.2. In the Custom Vision portal [https://customvision.ai](https://customvision.ai), make sure you are working in your object detection project _Grocery Detection_. Then select **Add images** and upload all of the images in the extracted folder.3. After the images have been uploaded, select the first one to open it.4. Hold the mouse over any object in the image until an automatically detected region is displayed like the image below. Then select the object, and if necessary resize the region to surround it.Alternatively, you can simply drag around the object to create a region.5. When the region surrounds the object, add a new tag with the appropriate object type (*apple*, *banana*, or *orange*) as shown here:6. Select and tag each other object in the image, resizing the regions and adding new tags as required.7. Use the **>** link on the right to go to the next image, and tag its objects. Then just keep working through the entire image collection, tagging each apple, banana, and orange.8. When you have finished tagging the last image, close the **Image Detail** editor and on the **Training Images** page, under **Tags**, select **Tagged** to see all of your tagged images: Train and test a modelNow that you've tagged the images in your project, you're ready to train a model.1. In the Custom Vision project, click **Train** to train an object detection model using the tagged images. Select the **Quick Training** option.2. Wait for training to complete (it might take ten minutes or so), and then review the *Precision*, *Recall*, and *mAP* performance metrics - these measure the prediction accuracy of the classification model, and should all be high.3. At the top right of the page, click **Quick Test**, and then in the **Image URL** box, enter `https://aka.ms/apple-orange` and view the prediction that is generated. Then close the **Quick Test** window. Publish and consume the object detection modelNow you're ready to publish your trained model and use it from a client application.1. At the top left of the **Performance** page, click **&128504; Publish** to publish the trained model with the following settings: - **Model name**: detect-produce - **Prediction Resource**: *Your custom vision **prediction** resource*. (!) Check In Did you use the same model name: **detect-produce**? 2. After publishing, click the *settings* (&9881;) icon at the top right of the **Performance** page to view the project settings. Then, under **General** (on the left), copy the **Project Id**. Scroll down and paste it into the code cell below step 5 replacing **YOUR_PROJECT_ID**. > (*if you used a **Cognitive Services** resource instead of creating a **Custom Vision** resource at the beginning of this exercise, you can copy its key and endpoint from the right side of the project settings, paste it into the code cell below, and run it to see the results. Otherwise, continue completing the steps below to get the key and endpoint for your Custom Vision prediction resource*).3. At the top left of the **Project Settings** page, click the *Projects Gallery* (&128065;) icon to return to the Custom Vision portal home page, where your project is now listed.4. On the Custom Vision portal home page, at the top right, click the *settings* (&9881;) icon to view the settings for your Custom Vision service. Then, under **Resources**, expand your *prediction* resource (not the training resource) and copy its **Key** and **Endpoint** values to the code cell below step 5, replacing **YOUR_KEY** and **YOUR_ENDPOINT**. (!) Check In If you are using a **Custom Vision** resource, did you use the **prediction** resource (not the training resource)?5. Run the code cell below by clicking the Run Cell &9655 button (at the top left of the cell) to set the variables to your project ID, key, and endpoint values.
###Code
project_id = 'YOUR_PROJECT_ID' # Replace with your project ID
cv_key = 'YOUR_KEY' # Replace with your prediction resource primary key
cv_endpoint = 'YOUR_ENDPOINT' # Replace with your prediction resource endpoint
model_name = 'detect-produce' # this must match the model name you set when publishing your model iteration exactly (including case)!
print('Ready to predict using model {} in project {}'.format(model_name, project_id))
###Output
_____no_output_____
###Markdown
Now you can use your key and endpoint with a Custom Vision client to connect to your custom vision object detection model.Run the following code cell, which uses your model to detect individual produce items in an image.> **Note**: Don't worry too much about the details of the code. It uses the Python SDK for the Custom Vision service to submit an image to your model and retrieve predictions for detected objects. Each prediction consists of a class name (*apple*, *banana*, or *orange*) and *bounding box* coordinates that indicate where in the image the predicted object has been detected. The code then uses this information to draw a labelled box around each object on the image.
###Code
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
from msrest.authentication import ApiKeyCredentials
from matplotlib import pyplot as plt
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import os
%matplotlib inline
# Load a test image and get its dimensions
test_img_file = os.path.join('data', 'object-detection', 'produce.jpg')
test_img = Image.open(test_img_file)
test_img_h, test_img_w, test_img_ch = np.array(test_img).shape
# Get a prediction client for the object detection model
credentials = ApiKeyCredentials(in_headers={"Prediction-key": cv_key})
predictor = CustomVisionPredictionClient(endpoint=cv_endpoint, credentials=credentials)
print('Detecting objects in {} using model {} in project {}...'.format(test_img_file, model_name, project_id))
# Detect objects in the test image
with open(test_img_file, mode="rb") as test_data:
results = predictor.detect_image(project_id, model_name, test_data)
# Create a figure to display the results
fig = plt.figure(figsize=(8, 8))
plt.axis('off')
# Display the image with boxes around each detected object
draw = ImageDraw.Draw(test_img)
lineWidth = int(np.array(test_img).shape[1]/100)
object_colors = {
"apple": "lightgreen",
"banana": "yellow",
"orange": "orange"
}
for prediction in results.predictions:
color = 'white' # default for 'other' object tags
if (prediction.probability*100) > 50:
if prediction.tag_name in object_colors:
color = object_colors[prediction.tag_name]
left = prediction.bounding_box.left * test_img_w
top = prediction.bounding_box.top * test_img_h
height = prediction.bounding_box.height * test_img_h
width = prediction.bounding_box.width * test_img_w
points = ((left,top), (left+width,top), (left+width,top+height), (left,top+height),(left,top))
draw.line(points, fill=color, width=lineWidth)
plt.annotate(prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100),(left,top), backgroundcolor=color)
plt.imshow(test_img)
###Output
_____no_output_____
###Markdown
Détection d’objets
*Détection d’objets* est une forme de vision informatique dans laquelle un modèle d’apprentissage automatique est entraîné à classer les instances individuelles d’objets dans une image et à indiquer une *case de délimitation* qui marque son emplacement. Vous pouvez considérer qu’il s’agit d’une progression de la *classification d’images* (dans laquelle le modèle répond à la question « de quoi s’agit-il ? ») vers la construction de solutions où l’on peut demander au modèle « quels sont les objets présents dans cette image, et où sont-ils ? ».

Par exemple, une épicerie pourrait utiliser un modèle de détection d’objets pour mettre en œuvre un système de caisse automatisé qui scanne un tapis roulant à l’aide d’une caméra et peut identifier des articles spécifiques sans avoir besoin de placer chaque article sur le tapis et de les scanner individuellement.
Le service cognitif **Vision personnalisée** de Microsoft Azure offre une solution basée sur le cloud pour créer et publier des modèles de détection d’objets personnalisés.
Créer une ressource Vision personnalisée
Pour utiliser le service Vision personnalisée, vous avez besoin d’une ressource Azure que vous pouvez utiliser pour entraîner un modèle, et d’une ressource avec laquelle vous pouvez le publier pour que les applications puissent l’utiliser. Vous pouvez utiliser la même ressource pour chacune de ces tâches, ou vous pouvez utiliser des ressources différentes pour chacune d’elles afin d’allouer les coûts séparément, à condition que les deux ressources soient créées dans la même région. La ressource pour l’une ou l’autre (ou les deux) tâches peut être une ressource **Cognitive Services** générale ou une ressource **Vision personnalisée** spécifique. Suivez les instructions suivantes pour créer une nouvelle ressource **Vision personnalisée** (ou vous pouvez utiliser une ressource existante si vous en avez une).
1. Dans un nouvel onglet de navigateur, ouvrez le portail Azure à l’adresse [https://portal.azure.com](https://portal.azure.com), et connectez-vous en utilisant le compte Microsoft associé à votre abonnement Azure.
2. Sélectionnez le bouton **&65291; Créer une ressource**, recherchez *Vision personnalisée*, et créez une ressource **Vision personnalisée** avec les paramètres suivants :
- **Créer des options** : Les deux
- **Abonnement** : *Votre abonnement Azure*
- **Groupe de ressources** : *Sélectionnez ou créez un groupe de ressources portant un nom unique*
- **Nom** : *Saisissez un nom unique*
- **Emplacement de formation** : *Choisissez une région disponible*
- **Niveau tarifaire de formation** : F0
- **Emplacement de prédiction** : *Le même que l’emplacement de formation*
- **Niveau tarifaire de prédiction** : F0
> **Remarque** : Si vous avez déjà un service de vision personnalisée F0 dans votre abonnement, sélectionnez **S0** pour celui-ci.
3. Attendez que la ressource soit créée.
Créer un projet Vision personnalisée
Pour entraîner un modèle de détection d’objets, vous devez créer un projet Vision personnalisée basé sur votre ressource de formation. Pour ce faire, vous utiliserez le portail Vision personnalisée.
1. Dans un nouvel onglet de navigateur, ouvrez le portail Vision personnalisée à l’adresse [https://customvision.ai](https://customvision.ai), et connectez-vous en utilisant le compte Microsoft associé à votre abonnement Azure.
2. Créez un nouveau projet avec les paramètres suivants :
- **Nom** : Détection d’épicerie
- **Description** : Détection d’objets pour les épiceries.
- **Ressource** : *La ressource Vision personnalisée que vous avez créée précédemment*
- **Types de projets** : Détection d’objets
- **Domaines** : Général
3. Attendez que le projet soit créée et ouvert dans le navigateur.
Ajoutez et étiquetez des images
Pour entraîner un modèle de détection d’objets, vous devez télécharger des images contenant les classes que vous souhaitez que le modèle identifie, et les étiqueter pour indiquer les cases de délimitation de chaque instance d’objet.
1. Téléchargez et extrayez les images d’entraînement à partir de https://aka.ms/fruit-objects. Le dossier extrait contient une collection d’images de fruits. **Remarque :** si vous ne pouvez pas accéder aux images de formation, une solution de contournement temporaire consiste à visiter la page https://www.github.com, puis la page https://aka.ms/fruit-objects.
2. Dans le portail Vision personnalisée [https://customvision.ai](https://customvision.ai), assurez-vous d’utiliser le modèle de détection d’objet project _Grocery Detection_. Sélectionnez ensuite **Add images** (Ajouter des images) et téléchargez toutes les images dans le dossier extrait.

3. Une fois les images téléchargées, sélectionnez la première pour l’ouvrir.
4. Maintenez la souris sur n’importe quel objet de l’image jusqu’à ce qu’une région détectée automatiquement s’affiche comme dans l’image ci-dessous. Sélectionnez ensuite l’objet et, si nécessaire, redimensionnez la région pour l’entourer.

Vous pouvez aussi simplement faire glisser l’objet pour créer une région.
5. Lorsque la région entoure l’objet, ajoutez une nouvelle étiquette avec le type d’objet approprié (*pomme*, *banane*, ou *orange*) comme illustré ici :

6. Sélectionnez et étiquetez chaque autre objet de l’image, en redimensionnant les régions et en ajoutant de nouvelles étiquettes si nécessaire.

7. Utilisez le lien **>** sur la droite pour passer à l’image suivante et étiqueter ses objets. Puis continuez à parcourir toute la collection d’images, en étiquetant chaque pomme, banane et orange.
8. Lorsque vous avez terminé d’étiqueter la dernière image, fermez l’éditeur **Détails de l’image** et sur la page **Images de formation**, sous **Étiquettes**, sélectionnez **Étiqueté** pour voir toutes vos images étiquetées :

Entraîner et tester un modèle
Maintenant que vous avez étiqueté les images de votre projet, vous êtes prêt à entraîner un modèle.
1. Dans le projet Vision personnalisée, cliquez sur **Entraîner** pour entraîner un modèle de détection d’objets à l’aide des images étiquetées. Sélectionnez l’option **Formation rapide**.
2. Attendez la fin de la formation (cela peut prendre une dizaine de minutes), puis vérifiez les mesures de performance *Précision*, *Rappel*, et *mAP* ; elles mesurent la précision de prédiction du modèle de classification et doivent toutes être élevées.
3. En haut à droite de la page, cliquez sur **Test rapide**, puis dans la case **URL image**, saisissez `https://aka.ms/apple-orange` et regardez la prédiction qui est générée. Fermez ensuite la fenêtre **Test rapide**.
Publier et consommer le modèle de détection d’objets
Vous êtes maintenant prêt à publier votre modèle entraîné et à l’utiliser à partir d’une application cliente.
1. En haut à gauche de la page **Performances**, cliquez sur **&128504; Publier** pour publier le modèle entraîné avec les paramètres suivants :
- **Nom du modèle** : détecter-produire
- **Ressource de prédiction** : *Votre **ressource** prédiction* de vision personnalisée.
(!) Vérification
Avez-vous utilisé le même nom de modèle : **détecter-produire** ?
2. Après la publication, cliquez sur l’icône *Paramètres* (&9881;) en haut à droite de la page **Performances** pour afficher les paramètres du projet. Ensuite, sous **Généralités** (à gauche), copiez **ID du projet**. Faites défiler la page vers le bas et collez-la dans la cellule de code située sous l’étape 5 en remplaçant **YOUR_PROJECT_ID**.
> (*(si vous avez utilisé une ressource **Cognitive Services** au lieu de créer une ressource **Vision personnalisée** au début de cet exercice, vous pouvez copier sa clé et son point de terminaison à partir de la partie droite des paramètres du projet, les coller dans la cellule de code ci-dessous et l’exécuter pour voir les résultats. Sinon, continuez à suivre les étapes ci-dessous pour obtenir la clé et le point de terminaison de votre ressource de prédiction Vision personnalisée*).
3. En haut à gauche de la page **Paramètres du projet**, cliquez sur l’icône Galerie de projets (&128065;) pour revenir à la page d’*Galerie de projets* (&128065;) pour revenir à la page d’accueil du portail Vision personnalisée, où votre projet est maintenant répertorié.
4. Sur la page d’accueil du portail Vision personnalisée, en haut à droite, cliquez sur l’icône *Paramètres* (&9881;) pour afficher les paramètres de votre service Vision personnalisée. Ensuite, sous **Ressources**, développez votre ressource *Prédiction* (pas la ressource Formation) et copiez ses valeurs **Clé** et **Point de terminaison** dans la cellule de code sous l’étape 5, en remplaçant **YOUR_KEY** et **YOUR_ENDPOINT**.
(!) Vérification
Si vous utilisez une ressource **Vision personnalisée**, avez-vous utilisé la ressource **Prédiction** (pas la ressource Formation) ?
5. Exécutez la cellule de code ci-dessous en cliquant sur le bouton Exécuter la cellule &9655; (en haut à gauche de la cellule) pour définir les variables sur vos valeurs d’ID de projet, de clé et de point de terminaison.
###Code
project_id = 'YOUR_PROJECT_ID' # Replace with your project ID
cv_key = 'YOUR_KEY' # Replace with your prediction resource primary key
cv_endpoint = 'YOUR_ENDPOINT' # Replace with your prediction resource endpoint
model_name = 'detect-produce' # this must match the model name you set when publishing your model iteration exactly (including case)!
print('Ready to predict using model {} in project {}'.format(model_name, project_id))
###Output
_____no_output_____
###Markdown
Vous pouvez maintenant utiliser votre clé et votre point de terminaison avec un client Vision personnalisée pour vous connecter à votre modèle de détection d’objets de vision personnalisée.
Exécutez la cellule de code suivante, qui utilise votre modèle pour détecter des produits individuels dans une image.
> **Remarque** : Ne vous souciez pas des détails du code. Il utilise le SDK Python pour le service Vision personnalisée pour soumettre une image à votre modèle et récupérer les prédictions pour les objets détectés. Chaque prédiction se compose d’un nom de classe (*pomme*, *banane*, ou *orange*) et les coordonnées *case de délimitation* qui indiquent où, dans l’image, l’objet prédit a été détecté. Le code utilise ensuite ces informations pour dessiner une boîte étiquetée autour de chaque objet sur l’image.
###Code
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
from msrest.authentication import ApiKeyCredentials
from matplotlib import pyplot as plt
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import os
%matplotlib inline
# Load a test image and get its dimensions
test_img_file = os.path.join('data', 'object-detection', 'produce.jpg')
test_img = Image.open(test_img_file)
test_img_h, test_img_w, test_img_ch = np.array(test_img).shape
# Get a prediction client for the object detection model
credentials = ApiKeyCredentials(in_headers={"Prediction-key": cv_key})
predictor = CustomVisionPredictionClient(endpoint=cv_endpoint, credentials=credentials)
print('Detecting objects in {} using model {} in project {}...'.format(test_img_file, model_name, project_id))
# Detect objects in the test image
with open(test_img_file, mode="rb") as test_data:
results = predictor.detect_image(project_id, model_name, test_data)
# Create a figure to display the results
fig = plt.figure(figsize=(8, 8))
plt.axis('off')
# Display the image with boxes around each detected object
draw = ImageDraw.Draw(test_img)
lineWidth = int(np.array(test_img).shape[1]/100)
object_colors = {
"apple": "lightgreen",
"banana": "yellow",
"orange": "orange"
}
for prediction in results.predictions:
color = 'white' # default for 'other' object tags
if (prediction.probability*100) > 50:
if prediction.tag_name in object_colors:
color = object_colors[prediction.tag_name]
left = prediction.bounding_box.left * test_img_w
top = prediction.bounding_box.top * test_img_h
height = prediction.bounding_box.height * test_img_h
width = prediction.bounding_box.width * test_img_w
points = ((left,top), (left+width,top), (left+width,top+height), (left,top+height),(left,top))
draw.line(points, fill=color, width=lineWidth)
plt.annotate(prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100),(left,top), backgroundcolor=color)
plt.imshow(test_img)
###Output
_____no_output_____
###Markdown
Object Detection*Object detection* is a form of computer vision in which a machine learning model is trained to classify individual instances of objects in an image, and indicate a *bounding box* that marks its location. You can think of this as a progression from *image classification* (in which the model answers the question "what is this an image of?") to building solutions where we can ask the model "what objects are in this image, and where are they?".For example, a grocery store might use an object detection model to implement an automated checkout system that scans a conveyor belt using a camera, and can identify specific items without the need to place each item on the belt and scan them individually.The **Custom Vision** cognitive service in Microsoft Azure provides a cloud-based solution for creating and publishing custom object detection models. Create a Custom Vision resourceTo use the Custom Vision service, you need an Azure resource that you can use to train a model, and a resource with which you can publish it for applications to use. You can use the same resource for each of these tasks, or you can use different resources for each to allocate costs separately provided both resources are created in the same region. The resource for either (or both) tasks can be a general **Cognitive Services** resource, or a specific **Custom Vision** resource. Use the following instructions to create a new **Custom Vision** resource (or you can use an existing resource if you have one).1. In a new browser tab, open the Azure portal at [https://portal.azure.com](https://portal.azure.com), and sign in using the Microsoft account associated with your Azure subscription.2. Select the **&65291;Create a resource** button, search for *custom vision*, and create a **Custom Vision** resource with the following settings: - **Create options**: Both - **Subscription**: *Your Azure subscription* - **Resource group**: *Select or create a resource group with a unique name* - **Name**: *Enter a unique name* - **Training location**: *Choose any available region* - **Training pricing tier**: F0 - **Prediction location**: *The same as the training location* - **Prediction pricing tier**: F0 > **Note**: If you already have an F0 custom vision service in your subscription, select **S0** for this one.3. Wait for the resource to be created. Create a Custom Vision projectTo train an object detection model, you need to create a Custom Vision project based on your training resource. To do this, you'll use the Custom Vision portal.1. In a new browser tab, open the Custom Vision portal at [https://customvision.ai](https://customvision.ai), and sign in using the Microsoft account associated with your Azure subscription.2. Create a new project with the following settings: - **Name**: Grocery Detection - **Description**: Object detection for groceries. - **Resource**: *The Custom Vision resource you created previously* - **Project Types**: Object Detection - **Domains**: General3. Wait for the project to be created and opened in the browser. Add and tag imagesTo train an object detection model, you need to upload images that contain the classes you want the model to identify, and tag them to indicate bounding boxes for each object instance.1. Download and extract the training images from https://aka.ms/fruit-objects. The extracted folder contains a collection of images of fruit.2. In the Custom Vision portal [https://customvision.ai](https://customvision.ai), make sure you are working in your object detection project _Grocery Detection_. Then select **Add images** and upload all of the images in the extracted folder.3. After the images have been uploaded, select the first one to open it.4. Hold the mouse over any object in the image until an automatically detected region is displayed like the image below. Then select the object, and if necessary resize the region to surround it.Alternatively, you can simply drag around the object to create a region.5. When the region surrounds the object, add a new tag with the appropriate object type (*apple*, *banana*, or *orange*) as shown here:6. Select and tag each other object in the image, resizing the regions and adding new tags as required.7. Use the **>** link on the right to go to the next image, and tag its objects. Then just keep working through the entire image collection, tagging each apple, banana, and orange.8. When you have finished tagging the last image, close the **Image Detail** editor and on the **Training Images** page, under **Tags**, select **Tagged** to see all of your tagged images: Train and test a modelNow that you've tagged the images in your project, you're ready to train a model.1. In the Custom Vision project, click **Train** to train an object detection model using the tagged images. Select the **Quick Training** option.2. Wait for training to complete (it might take ten minutes or so), and then review the *Precision*, *Recall*, and *mAP* performance metrics - these measure the prediction accuracy of the classification model, and should all be high.3. At the top right of the page, click **Quick Test**, and then in the **Image URL** box, enter `https://aka.ms/apple-orange` and view the prediction that is generated. Then close the **Quick Test** window. Publish and consume the object detection modelNow you're ready to publish your trained model and use it from a client application.1. At the top left of the **Performance** page, click **&128504; Publish** to publish the trained model with the following settings: - **Model name**: detect-produce - **Prediction Resource**: *Your custom vision **prediction** resource*. (!) Check In Did you use the same model name: **detect-produce**? 2. After publishing, click the *settings* (&9881;) icon at the top right of the **Performance** page to view the project settings. Then, under **General** (on the left), copy the **Project Id**. Scroll down and paste it into the code cell below step 5 replacing **YOUR_PROJECT_ID**. > (*if you used a **Cognitive Services** resource instead of creating a **Custom Vision** resource at the beginning of this exercise, you can copy its key and endpoint from the right side of the project settings, paste it into the code cell below, and run it to see the results. Otherwise, continue completing the steps below to get the key and endpoint for your Custom Vision prediction resource*).3. At the top left of the **Project Settings** page, click the *Projects Gallery* (&128065;) icon to return to the Custom Vision portal home page, where your project is now listed.4. On the Custom Vision portal home page, at the top right, click the *settings* (&9881;) icon to view the settings for your Custom Vision service. Then, under **Resources**, expand your *prediction* resource (not the training resource) and copy its **Key** and **Endpoint** values to the code cell below step 5, replacing **YOUR_KEY** and **YOUR_ENDPOINT**. (!) Check In If you are using a **Custom Vision** resource, did you use the **prediction** resource (not the training resource)?5. Run the code cell below by clicking the Run Cell &9655 button (at the top left of the cell) to set the variables to your project ID, key, and endpoint values.
###Code
from learntools.core import binder; binder.bind(globals())
print(globals())
from learntools.mslearn_ai900.object_detection import *
print("Setup complete.")
project_id = 'YOUR_PROJECT_ID' # Replace with your project ID
cv_key = 'YOUR_KEY' # Replace with your prediction resource primary key
cv_endpoint = 'YOUR_ENDPOINT' # Replace with your prediction resource endpoint
model_name = 'detect-produce' # this must match the model name you set when publishing your model iteration exactly (including case)!
print('Ready to predict using model {} in project {}'.format(model_name, project_id))
###Output
_____no_output_____
###Markdown
Now you can use your key and endpoint with a Custom Vision client to connect to your custom vision object detection model.Run the following code cell, which uses your model to detect individual produce items in an image.> **Note**: Don't worry too much about the details of the code. It uses the Python SDK for the Custom Vision service to submit an image to your model and retrieve predictions for detected objects. Each prediction consists of a class name (*apple*, *banana*, or *orange*) and *bounding box* coordinates that indicate where in the image the predicted object has been detected. The code then uses this information to draw a labelled box around each object on the image.
###Code
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
from msrest.authentication import ApiKeyCredentials
from matplotlib import pyplot as plt
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import os
%matplotlib inline
# Load a test image and get its dimensions
test_img_file = os.path.join('data', 'object-detection', 'produce.jpg')
test_img = Image.open(test_img_file)
test_img_h, test_img_w, test_img_ch = np.array(test_img).shape
# Get a prediction client for the object detection model
credentials = ApiKeyCredentials(in_headers={"Prediction-key": cv_key})
predictor = CustomVisionPredictionClient(endpoint=cv_endpoint, credentials=credentials)
print('Detecting objects in {} using model {} in project {}...'.format(test_img_file, model_name, project_id))
# Detect objects in the test image
with open(test_img_file, mode="rb") as test_data:
results = predictor.detect_image(project_id, model_name, test_data)
# Create a figure to display the results
fig = plt.figure(figsize=(8, 8))
plt.axis('off')
# Display the image with boxes around each detected object
draw = ImageDraw.Draw(test_img)
lineWidth = int(np.array(test_img).shape[1]/100)
object_colors = {
"apple": "lightgreen",
"banana": "yellow",
"orange": "orange"
}
for prediction in results.predictions:
color = 'white' # default for 'other' object tags
if (prediction.probability*100) > 50:
if prediction.tag_name in object_colors:
color = object_colors[prediction.tag_name]
left = prediction.bounding_box.left * test_img_w
top = prediction.bounding_box.top * test_img_h
height = prediction.bounding_box.height * test_img_h
width = prediction.bounding_box.width * test_img_w
points = ((left,top), (left+width,top), (left+width,top+height), (left,top+height),(left,top))
draw.line(points, fill=color, width=lineWidth)
plt.annotate(prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100),(left,top), backgroundcolor=color)
plt.imshow(test_img)
q1.check()
###Output
_____no_output_____
###Markdown
Object Detection
*Object detection* is a form of computer vision in which a machine learning model is trained to classify individual instances of objects in an image, and indicate a *bounding box* that marks its location. You can think of this as a progression from *image classification* (in which the model answers the question "what is this an image of?") to building solutions where we can ask the model "what objects are in this image, and where are they?".

For example, a grocery store might use an object detection model to implement an automated checkout system that scans a conveyor belt using a camera, and can identify specific items without the need to place each item on the belt and scan them individually.
The **Custom Vision** cognitive service in Microsoft Azure provides a cloud-based solution for creating and publishing custom object detection models.
Create a Custom Vision resource
To use the Custom Vision service, you need an Azure resource that you can use to train a model, and a resource with which you can publish it for applications to use. You can use the same resource for each of these tasks, or you can use different resources for each to allocate costs separately provided both resources are created in the same region. The resource for either (or both) tasks can be a general **Cognitive Services** resource, or a specific **Custom Vision** resource. Use the following instructions to create a new **Custom Vision** resource (or you can use an existing resource if you have one).
1. In a new browser tab, open the Azure portal at [https://portal.azure.com](https://portal.azure.com), and sign in using the Microsoft account associated with your Azure subscription.
2. Select the **&65291;Create a resource** button, search for *custom vision*, and create a **Custom Vision** resource with the following settings:
- **Create options**: Both
- **Subscription**: *Your Azure subscription*
- **Resource group**: *Select or create a resource group with a unique name*
- **Name**: *Enter a unique name*
- **Training location**: *Choose any available region*
- **Training pricing tier**: F0
- **Prediction location**: *The same as the training location*
- **Prediction pricing tier**: F0
> **Note**: If you already have an F0 custom vision service in your subscription, select **S0** for this one.
3. Wait for the resource to be created.
Create a Custom Vision project
To train an object detection model, you need to create a Custom Vision project based on your training resource. To do this, you'll use the Custom Vision portal.
1. In a new browser tab, open the Custom Vision portal at [https://customvision.ai](https://customvision.ai), and sign in using the Microsoft account associated with your Azure subscription.
2. Create a new project with the following settings:
- **Name**: Grocery Detection
- **Description**: Object detection for groceries.
- **Resource**: *The Custom Vision resource you created previously*
- **Project Types**: Object Detection
- **Domains**: General
3. Wait for the project to be created and opened in the browser.
Add and tag images
To train an object detection model, you need to upload images that contain the classes you want the model to identify, and tag them to indicate bounding boxes for each object instance.
1. Download and extract the training images from https://aka.ms/fruit-objects. The extracted folder contains a collection of images of fruit. **Note:** as a temporary workaround, if you are not able to access the training images, please go to https://www.github.com, then go to https://aka.ms/fruit-objects.
2. In the Custom Vision portal [https://customvision.ai](https://customvision.ai), make sure you are working in your object detection project _Grocery Detection_. Then select **Add images** and upload all of the images in the extracted folder.

3. After the images have been uploaded, select the first one to open it.
4. Hold the mouse over any object in the image until an automatically detected region is displayed like the image below. Then select the object, and if necessary resize the region to surround it.

Alternatively, you can simply drag around the object to create a region.
5. When the region surrounds the object, add a new tag with the appropriate object type (*apple*, *banana*, or *orange*) as shown here:

6. Select and tag each other object in the image, resizing the regions and adding new tags as required.

7. Use the **>** link on the right to go to the next image, and tag its objects. Then just keep working through the entire image collection, tagging each apple, banana, and orange.
8. When you have finished tagging the last image, close the **Image Detail** editor and on the **Training Images** page, under **Tags**, select **Tagged** to see all of your tagged images:

Train and test a model
Now that you've tagged the images in your project, you're ready to train a model.
1. In the Custom Vision project, click **Train** to train an object detection model using the tagged images. Select the **Quick Training** option.
2. Wait for training to complete (it might take ten minutes or so), and then review the *Precision*, *Recall*, and *mAP* performance metrics - these measure the prediction accuracy of the classification model, and should all be high.
3. At the top right of the page, click **Quick Test**, and then in the **Image URL** box, enter `https://aka.ms/apple-orange` and view the prediction that is generated. Then close the **Quick Test** window.
Publish and consume the object detection model
Now you're ready to publish your trained model and use it from a client application.
1. At the top left of the **Performance** page, click **&128504; Publish** to publish the trained model with the following settings:
- **Model name**: detect-produce
- **Prediction Resource**: *Your custom vision **prediction** resource*.
(!) Check In
Did you use the same model name: **detect-produce**?
2. After publishing, click the *settings* (&9881;) icon at the top right of the **Performance** page to view the project settings. Then, under **General** (on the left), copy the **Project Id**. Scroll down and paste it into the code cell below step 5 replacing **YOUR_PROJECT_ID**.
> (*if you used a **Cognitive Services** resource instead of creating a **Custom Vision** resource at the beginning of this exercise, you can copy its key and endpoint from the right side of the project settings, paste it into the code cell below, and run it to see the results. Otherwise, continue completing the steps below to get the key and endpoint for your Custom Vision prediction resource*).
3. At the top left of the **Project Settings** page, click the *Projects Gallery* (&128065;) icon to return to the Custom Vision portal home page, where your project is now listed.
4. On the Custom Vision portal home page, at the top right, click the *settings* (&9881;) icon to view the settings for your Custom Vision service. Then, under **Resources**, expand your *prediction* resource (not the training resource) and copy its **Key** and **Endpoint** values to the code cell below step 5, replacing **YOUR_KEY** and **YOUR_ENDPOINT**.
(!) Check In
If you are using a **Custom Vision** resource, did you use the **prediction** resource (not the training resource)?
5. Run the code cell below by clicking the Run Cell &9655 button (at the top left of the cell) to set the variables to your project ID, key, and endpoint values.
###Code
project_id = 'YOUR_PROJECT_ID' # Replace with your project ID
cv_key = 'YOUR_KEY' # Replace with your prediction resource primary key
cv_endpoint = 'YOUR_ENDPOINT' # Replace with your prediction resource endpoint
model_name = 'detect-produce' # this must match the model name you set when publishing your model iteration exactly (including case)!
print('Ready to predict using model {} in project {}'.format(model_name, project_id))
###Output
_____no_output_____
###Markdown
Now you can use your key and endpoint with a Custom Vision client to connect to your custom vision object detection model.Run the following code cell, which uses your model to detect individual produce items in an image.> **Note**: Don't worry too much about the details of the code. It uses the Python SDK for the Custom Vision service to submit an image to your model and retrieve predictions for detected objects. Each prediction consists of a class name (*apple*, *banana*, or *orange*) and *bounding box* coordinates that indicate where in the image the predicted object has been detected. The code then uses this information to draw a labelled box around each object on the image.
###Code
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
from msrest.authentication import ApiKeyCredentials
from matplotlib import pyplot as plt
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import os
%matplotlib inline
# Load a test image and get its dimensions
test_img_file = os.path.join('data', 'object-detection', 'produce.jpg')
test_img = Image.open(test_img_file)
test_img_h, test_img_w, test_img_ch = np.array(test_img).shape
# Get a prediction client for the object detection model
credentials = ApiKeyCredentials(in_headers={"Prediction-key": cv_key})
predictor = CustomVisionPredictionClient(endpoint=cv_endpoint, credentials=credentials)
print('Detecting objects in {} using model {} in project {}...'.format(test_img_file, model_name, project_id))
# Detect objects in the test image
with open(test_img_file, mode="rb") as test_data:
results = predictor.detect_image(project_id, model_name, test_data)
# Create a figure to display the results
fig = plt.figure(figsize=(8, 8))
plt.axis('off')
# Display the image with boxes around each detected object
draw = ImageDraw.Draw(test_img)
lineWidth = int(np.array(test_img).shape[1]/100)
object_colors = {
"apple": "lightgreen",
"banana": "yellow",
"orange": "orange"
}
for prediction in results.predictions:
color = 'white' # default for 'other' object tags
if (prediction.probability*100) > 50:
if prediction.tag_name in object_colors:
color = object_colors[prediction.tag_name]
left = prediction.bounding_box.left * test_img_w
top = prediction.bounding_box.top * test_img_h
height = prediction.bounding_box.height * test_img_h
width = prediction.bounding_box.width * test_img_w
points = ((left,top), (left+width,top), (left+width,top+height), (left,top+height),(left,top))
draw.line(points, fill=color, width=lineWidth)
plt.annotate(prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100),(left,top), backgroundcolor=color)
plt.imshow(test_img)
###Output
_____no_output_____
###Markdown
Rilevamento degli oggetti
Il *rilevamento degli oggetti* è una forma di visione artificiale in cui viene eseguito il training di un modello di apprendimento automatico per classificare le singole istanze di oggetti in un'immagine e indicare un *riquadro delimitatore del testo* che contrassegna la sua posizione. Puoi considerarlo come una progressione dalla *classificazione delle immagini* (in cui il modello risponde alla domanda "cosa riguarda questa immagine?") allo sviluppo di soluzioni in cui possiamo chiedere al modello "che oggetti ci sono in questa immagine, e dove sono?".

Ad esempio, un negozio di alimentari potrebbe utilizzare un modello di rilevamento degli oggetti per implementare un sistema di cassa automatizzato che scansiona un nastro trasportatore utilizzando una telecamera ed è in grado di identificare articoli specifici senza la necessità di posizionare ogni articolo sul nastro e scansionarlo individualmente.
Il servizio cognitivo **Visione personalizzata** in Microsoft Azure fornisce una soluzione basata su cloud per la creazione e la pubblicazione di modelli di rilevamento di oggetti personalizzati.
Crea una risorsa Visione personalizzata
Per utilizzare il servizio Visione personalizzata, hai bisogno di una risorsa di Azure che puoi utilizzare per eseguire il training di un modello e una risorsa con la quale puoi pubblicarlo per usare le applicazioni. Puoi usare la stessa risorsa per ognuna di queste attività, o puoi usare risorse diverse per ognuna per allocare i costi separatamente, a condizione che entrambe le risorse siano create nella stessa area geografica. La risorsa per una (o entrambe) le attività può essere una risorsa generale di **Servizi cognitivi**, o una risorsa specifica di **Visione personalizzata**. Usa le istruzioni seguenti per creare una nuova risorsa **Visione personalizzata** (o puoi usare una risorsa esistente se ne hai una).
1. In una nuova scheda del browser, apri il portale Azure all'indirizzo [https://portal.azure.com](https://portal.azure.com) e accedi utilizzando l'account Microsoft associato alla tua sottoscrizione di Azure.
2. Seleziona il pulsante **&65291;Crea una risorsa**, cerca *visione personalizzata* e crea una risorsa di **Visione personalizzata** con le impostazioni seguenti:
- **Crea opzioni**: Entrambe
- **Sottoscrizione**: *la tua sottoscrizione di Azure*
- **Gruppo di risorse**: *Seleziona o crea un gruppo di risorse con un nome univoco*
- **Nome**: *Immetti un nome univoco*
- **Località training**: *Scegli una qualsiasi area disponibile*
- **Piano tariffario training**: F0
- **Località previsione**: *Lo stesso del percorso di training*
- **Piano tariffario previsione**: F0
> **Nota**: Se disponi già di un servizio di visione personalizzata F0 nella tua sottoscrizione, seleziona **S0**.
3. Attendi che la risorsa venga creata.
Crea un progetto Visione personalizzata
Per eseguire il training di un modello di rilevamento degli oggetti, è necessario creare un progetto Visione personalizzata basato sulla risorsa di training. Per farlo, userai il portale Visione personalizzata.
1. In una nuova scheda del browser, apri il portale Visione personalizzata all'indirizzo [ https://customvision.ai](https://customvision.ai) e accedi utilizzando l'account Microsoft associato alla tua sottoscrizione di Azure.
2. Crea un nuovo progetto con le impostazioni seguenti:
- **Nome**: Rilevamento alimentari
- **Descrizione**: Rilevamento di oggetti per negozi di alimentari.
- **Risorsa**: *La risorsa Visione personalizzata creata in precedenza*
- **Tipi progetto**: Rilevamento degli oggetti
- **Domini**: Generale
3. Attendi che il progetto venga creato e aperto nel browser.
Aggiungi e aggiungi tag alle immagini
Per eseguire il training di un modello di rilevamento degli oggetti, è necessario caricare le immagini che contengono le classi che vuoi fare identificare dal modello e aggiungervi tag per indicare i riquadri delimitatori del testo per ogni istanza dell'oggetto.
1. Scaricare ed estrarre le immagini di training da https://aka.ms/fruit-objects. La cartella estratta contiene una raccolta di immagini di frutta. **Nota:** come soluzione temporanea, nel caso non sia possibile accedere alle immagini di training, passare a https://www.github.com e quindi a https://aka.ms/fruit-objects.
2. Nel portale Visione personalizzata [https://customvision.ai](https://customvision.ai), verifica di trovarti nel tuo progetto di rilevamento oggetti _Grocery Detection_. Quindi seleziona **Aggiungi immagini** e carica tutte le immagini nella cartella estratta.

3. Una volta che le immagini sono state caricate, seleziona la prima per aprirla.
4. Tieni il mouse sopra qualsiasi oggetto nell'immagine fino a quando viene visualizzata un'area geografica rilevata automaticamente come l'immagine seguente. Seleziona quindi l'oggetto e, se necessario, ridimensiona l'area geografica per circondarlo.

In alternativa, puoi semplicemente trascinare il mouse intorno all'oggetto per creare un'area geografica.
5. Quando l'area geografica circonda l'oggetto, aggiungi un nuovo tag con il tipo di oggetto appropriato (*mela*, *banana* o *arancia*) come mostrato qui:

6. Seleziona e aggiungi tag a ogni altro oggetto nell'immagine, ridimensionando le aree geografiche e aggiungendo nuovi tag come necessario.

7. Usa il link **>** sulla destra per passare all'immagine successiva e aggiungere un tag ai suoi oggetti. Poi continua a lavorare su tutta la raccolta di immagini, aggiungendo un tag a ogni mela, banana e arancia.
8. Quando hai finito di aggiungere tag all'ultima immagine, chiudi l'editor **Dettaglio immagine** e nella pagina **Immagini training**, in **Tag**, seleziona **Con tag** per vedere tutte le tue immagini con tag:

Esegui il training e il test di un modello
Ora che hai aggiunto un tag alle immagini nel tuo progetto, è tutto pronto per eseguire il training di un modello.
1. Nel progetto Visione personalizzata, fai clic su **Esegui il training** per eseguire il training di un modello di rilevamento degli oggetti utilizzando le immagini con tag. Seleziona l'opzione **Training rapido**.
2. Attendi il completamento del training (potrebbe richiedere circa dieci minuti), quindi esamina le metriche di prestazione *Precisione*, *Richiamo*, e *AP*, che misurano l'accuratezza della previsione del modello di classificazione e dovrebbero essere tutte elevate.
3. In alto a destra della pagina, fai clic su **Test rapido** e poi nella casella **URL immagine**, inserisci `https://aka.ms/apple-orange` e visualizza la previsione che viene generata. Chiudi quindi la finestra **Test rapido**.
Pubblica e consuma il modello di rilevamento degli oggetti
Ora è tutto pronto per pubblicare il tuo modello sottoposto a training e usarlo da un'applicazione client.
1. Nella parte in alto a sinistra della pagina **Prestazioni**, fai clic su **&128504; Pubblica** per pubblicare il modello sottoposto a training con le seguenti impostazioni:
- **Nome modello**: detect-produce
- **Risorsa di previsione**: *La tua risorsa di **previsione** personalizzata*.
(!) Verifica
Hai usato lo stesso nome del modello: **detect-produce**?
2. Dopo la pubblicazione, fai clic sull'icona delle *impostazioni * (&9881;) in alto a destra della pagina **Prestazioni** per visualizzare le impostazioni del progetto. Quindi, in **Generali** (a sinistra), copia l'**ID progetto**. Scorri verso il basso e incollalo nella cella del codice sotto il passaggio 5 sostituendo **YOUR_PROJECT_ID**.
> (*se hai usato una risorsa di **Servizi cognitivi** anziché creare una risorsa di **Visione personalizzata** all'inizio di questo esercizio, puoi copiare la sua chiave e l'endpoint dal lato destro delle impostazioni del progetto, incollarlo nella cella di codice di seguito, ed eseguirlo per vedere i risultati. Altrimenti, continua a completare i passaggi seguenti per ottenere la chiave e l'endpoint per la tua risorsa di previsione Visione personalizzata*).
3. In alto a sinistra della pagina **Impostazioni progetto**, fai clic sull'icona *Galleria progetti* (&128065;) per tornare alla pagina iniziale del portale Visione personalizzata, dove adesso è elencato il tuo progetto.
4. Nella pagina iniziale del portale Visione personalizzata, in alto a destra, fai clic sull'icona delle *impostazioni* (&9881;) per visualizzare le impostazioni del tuo servizio Visione personalizzata. Quindi, in **Risorse**, espandi la tua risorsa di *previsione* (non la risorsa di training) e copia i suoi valori **Chiave** ed **Endpoint** nella cella di codice sotto il passaggio 5, sostituendo **YOUR_KEY** e **YOUR_ENDPOINT**.
(!) Verifica
Se stai usando una risorsa **Visione personalizzata**, hai usato la risorsa di **previsione** (non la risorsa di training)?
5. Esegui la cella di codice di seguito facendo clic sul pulsante Esegui cella &9655; (in alto a sinistra della cella) per impostare le variabili sui valori dell'ID del progetto, della chiave e dell'endpoint.
###Code
project_id = 'YOUR_PROJECT_ID' # Replace with your project ID
cv_key = 'YOUR_KEY' # Replace with your prediction resource primary key
cv_endpoint = 'YOUR_ENDPOINT' # Replace with your prediction resource endpoint
model_name = 'detect-produce' # this must match the model name you set when publishing your model iteration exactly (including case)!
print('Ready to predict using model {} in project {}'.format(model_name, project_id))
###Output
_____no_output_____
###Markdown
Ora puoi utilizzare la chiave e l'endpoint con un client di Visione personalizzata per connetterti al tuo modello di rilevamento degli oggetti.
Esegui la seguente cella di codice, che usa il tuo modello per rilevare i singoli articoli di produzione in un'immagine.
> **Nota**: Non preoccuparti troppo dei dettagli del codice. Utilizza l'SDK Python per il servizio Visione personalizzata per inviare un'immagine al tuo modello e recuperare le previsioni per gli oggetti rilevati. Ogni previsione consiste in un nome di classe (*mela*, *banana* o *arancia*) e in coordinate del *riquadro delimitatore del testo* che indicano il punto dell'immagine in cui l'oggetto previsto è stato rilevato. Il codice usa quindi queste informazioni per disegnare un riquadro etichettato intorno a ciascun oggetto sull'immagine.
###Code
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
from msrest.authentication import ApiKeyCredentials
from matplotlib import pyplot as plt
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import os
%matplotlib inline
# Load a test image and get its dimensions
test_img_file = os.path.join('data', 'object-detection', 'produce.jpg')
test_img = Image.open(test_img_file)
test_img_h, test_img_w, test_img_ch = np.array(test_img).shape
# Get a prediction client for the object detection model
credentials = ApiKeyCredentials(in_headers={"Prediction-key": cv_key})
predictor = CustomVisionPredictionClient(endpoint=cv_endpoint, credentials=credentials)
print('Detecting objects in {} using model {} in project {}...'.format(test_img_file, model_name, project_id))
# Detect objects in the test image
with open(test_img_file, mode="rb") as test_data:
results = predictor.detect_image(project_id, model_name, test_data)
# Create a figure to display the results
fig = plt.figure(figsize=(8, 8))
plt.axis('off')
# Display the image with boxes around each detected object
draw = ImageDraw.Draw(test_img)
lineWidth = int(np.array(test_img).shape[1]/100)
object_colors = {
"apple": "lightgreen",
"banana": "yellow",
"orange": "orange"
}
for prediction in results.predictions:
color = 'white' # default for 'other' object tags
if (prediction.probability*100) > 50:
if prediction.tag_name in object_colors:
color = object_colors[prediction.tag_name]
left = prediction.bounding_box.left * test_img_w
top = prediction.bounding_box.top * test_img_h
height = prediction.bounding_box.height * test_img_h
width = prediction.bounding_box.width * test_img_w
points = ((left,top), (left+width,top), (left+width,top+height), (left,top+height),(left,top))
draw.line(points, fill=color, width=lineWidth)
plt.annotate(prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100),(left,top), backgroundcolor=color)
plt.imshow(test_img)
###Output
_____no_output_____
###Markdown
Обнаружение объектов
*Обнаружение объектов* — это форма компьютерного зрения, в которой модель машинного обучения обучена классифицировать отдельные экземпляры объектов на изображении и обозначает *ограничивающие прямоугольники*, отмечающие местоположение объектов. Можно рассматривать это как переход от *классификации изображений* (при которой модель отвечает на вопрос «Что это за изображение?») к построению решений, где мы можем спросить модель «Какие объекты на этом изображении, и где они находятся?».

Например, в продуктовом магазине может быть использована модель обнаружения объектов для реализации автоматизированной системы контроля, которая сканирует конвейерную ленту с помощью камеры и может идентифицировать конкретные предметы без необходимости размещения каждого предмета на ленте и сканирования их по отдельности.
Когнитивная служба **Пользовательское визуальное распознавание** в Microsoft Azure предоставляет облачное решение для создания и публикации пользовательских моделей обнаружения объектов.
Создание ресурса пользовательского визуального распознавания
Чтобы использовать службу пользовательского визуального распознавания, вам нужен ресурс Azure, который вы можете использовать для обучения модели, и ресурс, с помощью которого вы можете опубликовать ее для использования приложениями. Можно использовать один и тот же ресурс для каждой из этих задач или различные ресурсы для каждой из них, чтобы распределить затраты по отдельности, при условии, что оба ресурса созданы в одном и том же регионе. Ресурс для одной (или обеих) задач может быть общим ресурсом **когнитивных служб Cognitive Services** или специфическим ресурсом **службы пользовательского визуального распознавания**. Используйте следующие инструкции для создания нового ресурса **пользовательского визуального распознавания** (или вы можете использовать существующий ресурс, если он у вас есть).
1. В новой вкладке браузера войдите на портал Azure по адресу: [https://portal.azure.com](https://portal.azure.com), используя учетную запись Майкрософт, связанную с вашей подпиской Azure.
2. Нажмите кнопку **&65291;Создать ресурс**, выполните поиск по запросу *пользовательское визуальное распознавание* и создайте ресурс **Пользовательское визуальное распознавание** со следующими параметрами:
- **Параметры создания**: Обе
- **Подписка**. *Ваша подписка Azure*
- **Группа ресурсов**. *Выберите или создайте группу ресурсов с уникальным именем.*
- **Имя**. *Введите уникальное имя*
- **Место проведения обучения**: *Выберите любой доступный регион*
- **Ценовая категория обучения**: F0
- **Место прогнозирования**. *То же, что и у ресурса обучения*
- **Ценовая категория для прогнозирования**. F0
> **Примечание**: Если в вашей подписке уже есть служба пользовательского визуального распознавания F0, выберите **S0** для этого.
3. Дождитесь, пока завершится создание ресурса.
Создайте проект службы «Пользовательское визуальное распознавание».
Для обучения модели обнаружения объектов вам необходимо создать проект службы «Пользовательское визуальное распознавание» на основе вашего ресурса обучения. Для этого вы будете использовать портал «Пользовательское визуальное распознавание».
1. В новой вкладке браузера войдите на портал «Пользовательское визуальное распознавание» по адресу: [https://customvision.ai](https://customvision.ai), используя учетную запись Майкрософт, связанную с вашей подпиской Azure.
2. Создайте новый проект со следующими параметрами.
- **Имя**. Обнаружение продуктов
- **Описание**. Обнаружение объектов для продуктов.
- **Ресурс**: *Ресурс пользовательского визуального распознавания, который вы создали ранее*
- **Типы проектов**. Обнаружение объектов
- **Домены**: Общие
3. Подождите, пока проект будет создан и открыт в браузере.
Как добавить и пометить изображения
Для обучения модели обнаружения объектов нужно загрузить изображения, содержащие классы, которые вы хотите, чтобы модель идентифицировала, и пометить их, чтобы обозначить ограничивающие прямоугольники для каждого экземпляра объекта.
1. Скачайте и извлеките обучающие изображения по адресу: https://aka.ms/fruit-objects. Извлеченная папка содержит коллекцию изображений фруктов. **Примечание.** Если доступ к обучающим изображениям невозможен, в качестве временного решения перейдите на страницу https://www.github.com и затем https://aka.ms/fruit-objects.
2. Убедитесь на портале «Пользовательское визуальное распознавание» [https://customvision.ai](https://customvision.ai), что вы работаете в своем проекте обнаружения объектов _Grocery Detection_. Затем выберите **Добавить изображения** и выгрузите все изображения в извлеченной папке.

3. После того, как изображения будут загружены, выберите первое, чтобы открыть его.
4. Удерживайте курсор мыши над любым объектом на изображении до тех пор, пока не отобразится автоматически обнаруженная область, как показано на изображении ниже. Затем выберите объект и при необходимости измените размер области, чтобы окружить его.

Или же можно просто обвести объект, чтобы создать область.
5. После того как область окружит объект, добавьте новый тег с соответствующим типом объекта (*яблоко*, *банан* или *апельсин*), как показано здесь:

6. Выберите и пометьте каждый объект на изображении, изменив размер областей и добавив новые теги по мере необходимости.

7. Используйте ссылку **>** справа, чтобы перейти к следующему изображению и пометить его объекты. Затем просто продолжайте работать над всей коллекцией изображений, пометив каждое яблоко, банан и апельсин.
8. По окончании тегирования последнего изображения закройте редактор **Подробности изображения** и на странице **Обучающие изображения** в разделе **Теги** выберите пункт **Помеченные**, чтобы просмотреть все помеченные изображения:

Обучение и тестирование модели
Теперь, когда вы пометили изображения в своем проекте, вы готовы обучить модель.
1. В проекте пользовательского визуального распознавания нажмите кнопку **Обучение** над изображениями, чтобы обучить модель обнаружения с помощью помеченных изображений. Выберите параметр **Быстрое обучение**.
2. Дождитесь окончания обучения (это может занять около десяти минут), а затем просмотрите показатели производительности *Точность*, *Полнота* и *mAP* — они измеряют точность прогнозирования классификационной модели, и все они должны быть высокими.
3. В правом верхнем углу страницы нажмите кнопку **Быстрый тест**, а затем в окне **URL-адрес изображения** введите `https://aka.ms/apple-orange` и просмотрите прогноз, который генерируется. Затем закройте окно **Быстрый тест**.
Публикация и использование модели обнаружения объектов
Теперь вы готовы опубликовать свою обученную модель и использовать ее из клиентского приложения.
1. В верхней левой части страницы **Производительность** нажмите **&128504; Опубликовать**, чтобы опубликовать обученную модель со следующими параметрами:
- **Название модели**: detect-produce
- **Ресурс прогнозирования**: *Ваш ресурс **прогнозирования** пользовательского визуального распознавания*.
(!) Проверка
Вы использовали то же название модели: **detect-produce**?
2. После публикации нажмите на значок *Настройки* (&9881;) в правом верхнем углу страницы **Производительность**, чтобы просмотреть настройки проекта. Затем в разделе **Общие** (слева) скопируйте **идентификатор проекта**. Прокрутите вниз и вставьте его в ячейку с кодом, указанную ниже в шаге 5, заменив **YOUR_PROJECT_ID**.
> (*если в начале этого упражнения вы использовали ресурс **Cognitive Services** вместо создания ресурса **пользовательского визуального распознавания**, можно скопировать его ключ и конечную точку из правой части настроек проекта, вставить их в ячейку с кодом ниже и выполнить его, чтобы увидеть результаты. В противном случае, продолжайте выполнение описанных ниже шагов, чтобы получить ключ и конечную точку для вашего ресурса прогнозирования пользовательского визуального распознавания*).
3. В левом верхнем углу страницы **Настройки проекта** нажмите на значок *Галерея проектов* (&128065;), чтобы вернуться на главную страницу портала «Пользовательское визуальное распознавание», где теперь находится ваш проект.
4. На главной странице портала «Пользовательское визуальное распознавание», в правом верхнем углу, нажмите на значок *Настройки* (&9881;), чтобы просмотреть настройки своей службы пользовательского визуального распознавания. Затем в разделе **Ресурсы** разверните свой ресурс *прогнозирования* (не ресурс обучения) и скопируйте его значения **Ключ** и **Конечная точка** в ячейку с кодом, указанную ниже в шаге 5, заменив **YOUR_KEY** и **YOUR_ENDPOINT**.
(!) Проверка
Если вы используете ресурс **пользовательского визуального распознавания**, использовали ли вы ресурс **прогнозирования** (не ресурс обучения)?
5. Выполните код в расположенной ниже ячейке с кодом, нажав на кнопку «Выполнить код в ячейке» &9655; (слева от ячейки), чтобы установить переменные в соответствие с идентификатором проекта, ключом и значениями конечной точки.
###Code
project_id = 'YOUR_PROJECT_ID' # Replace with your project ID
cv_key = 'YOUR_KEY' # Replace with your prediction resource primary key
cv_endpoint = 'YOUR_ENDPOINT' # Replace with your prediction resource endpoint
model_name = 'detect-produce' # this must match the model name you set when publishing your model iteration exactly (including case)!
print('Ready to predict using model {} in project {}'.format(model_name, project_id))
###Output
_____no_output_____
###Markdown
Теперь вы можете использовать ключ и конечную точку с клиентом службы пользовательского визуального распознавания для подключения к вашей модели обнаружения объектов пользовательского визуального распознавания.
Выполните код в следующей ячейке, которая использует вашу модель для обнаружения отдельных объектов на изображении.
> **Примечание**. Не стоит волноваться по поводу содержимого кода. Для службы пользовательского визуального распознавания в нем используется Python SDK, чтобы отправить изображение вашей модели и получить прогнозы для обнаруженных объектов. Каждый прогноз состоит из имени класса (*яблоко*, *банан* или *апельсин*) и координат *ограничивающего прямоугольника*, указывающего, где на изображении был обнаружен спрогнозированный объект. Затем код использует эту информацию, чтобы нарисовать помеченное окошко вокруг каждого объекта на изображении.
###Code
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
from msrest.authentication import ApiKeyCredentials
from matplotlib import pyplot as plt
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import os
%matplotlib inline
# Load a test image and get its dimensions
test_img_file = os.path.join('data', 'object-detection', 'produce.jpg')
test_img = Image.open(test_img_file)
test_img_h, test_img_w, test_img_ch = np.array(test_img).shape
# Get a prediction client for the object detection model
credentials = ApiKeyCredentials(in_headers={"Prediction-key": cv_key})
predictor = CustomVisionPredictionClient(endpoint=cv_endpoint, credentials=credentials)
print('Detecting objects in {} using model {} in project {}...'.format(test_img_file, model_name, project_id))
# Detect objects in the test image
with open(test_img_file, mode="rb") as test_data:
results = predictor.detect_image(project_id, model_name, test_data)
# Create a figure to display the results
fig = plt.figure(figsize=(8, 8))
plt.axis('off')
# Display the image with boxes around each detected object
draw = ImageDraw.Draw(test_img)
lineWidth = int(np.array(test_img).shape[1]/100)
object_colors = {
"apple": "lightgreen",
"banana": "yellow",
"orange": "orange"
}
for prediction in results.predictions:
color = 'white' # default for 'other' object tags
if (prediction.probability*100) > 50:
if prediction.tag_name in object_colors:
color = object_colors[prediction.tag_name]
left = prediction.bounding_box.left * test_img_w
top = prediction.bounding_box.top * test_img_h
height = prediction.bounding_box.height * test_img_h
width = prediction.bounding_box.width * test_img_w
points = ((left,top), (left+width,top), (left+width,top+height), (left,top+height),(left,top))
draw.line(points, fill=color, width=lineWidth)
plt.annotate(prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100),(left,top), backgroundcolor=color)
plt.imshow(test_img)
###Output
_____no_output_____
###Markdown
物件偵測
*物件偵測*是一種電腦視覺形式,可在其中訓練機器學習模型,以便在影像中分類物件的個別執行個體,並指示標記物件位置的*周框方塊*您可以將其視為從*影像分類* (其中模型將回答「這是什麼的影像?」這一問題) 到組建解決方案的進展,在解決方案中我們可以詢問模型「影像中有什麼物件,以及這些物件在何處?」。

例如,雜貨店可能使用物件偵測模型來實作自動化結帳系統,該系統使用相機掃描輸送帶,無需將每件產品放置在傳動帶上就能識別特定商品,還能逐個掃描。
Microsoft Azure 中的**自訂視覺**認知服務為建立和發佈自訂物件偵測模型提供雲端式解決方案。
建立自訂視覺資源
若要使用自訂視覺服務,您需要可以用來訓練模型的 Azure 資源,以及可以發佈以供應用程式使用的資源。您可以將同一個資源用於每項工作,或者您可以為每項工作使用不同的資源來另行配置成本,前提是兩種資源均建立在同一區域。一項 (或者兩項) 工作的資源可以是一般性**認知服務**資源,或特定的**自訂視覺**資源。請使用以下指示來建立新**自訂視覺**資源 (或者如有現有資源,亦可使用)。
1. 在新的瀏覽器索引標籤中,透過 [https://portal.azure.com](https://portal.azure.com) 開啟 Azure 入口網站,並使用與您的 Azure 訂用帳戶關聯的 Microsoft 帳戶登入。
2. 選取 **[&65291; 建立資源]** 按鈕,搜尋*自訂視覺*,並建立包含以下設定的**自訂視覺**資源:
- **建立選項**:兩個
- **訂用帳戶**: *您的 Azure 訂用帳戶*
- **資源群組**: *選取或建立具有唯一名稱的資源群組*
- **名稱**: *輸入唯一名稱*
- **訓練位置**: *選擇任一可用區域*
- **訓練定價層**:F0
- **預測位置**: *與訓練位置一致*
- **預測定價層**:F0
> **備註**:若您的訂用帳戶中已經有 F0 自訂視覺服務,請為這一個選取 **[S0]**。
3. 等待資源建立。
建立自訂視覺專案
要訓練物件偵測模型,您需要以訓練資源為基礎建立自訂視覺專案。為此,您將使用自訂視覺入口網站。
1. 在新的瀏覽器索引標籤中,透過 [https://customvision.ai](https://customvision.ai) 開啟自訂視覺入口網站,並使用與您的 Azure 訂用帳戶關聯的 Microsoft 帳戶登入。
2. 建立一個包含以下設定的新專案:
- **名稱**:雜貨店偵測
- **描述**:針對雜貨店的物件偵測。
- **資源**: *先前建立的自訂視覺資源*
- **專案類型**:物件偵測
- **網域**:普通
3. 等待專案建立並在瀏覽器中開啟。
新增並標記影像
要訓練物件偵測模型,您需要上傳影像 (該影像包含需要模型識別的類別),並標記它們以指示每個物件執行個體的周框方塊。
1.從 https://aka.ms/fruit-objects 中下載並擷取訓練影像。已擷取的資料夾包含水果影像的集合物件。**注意:** 作為臨時解決辦法,如果您無法存取訓練影像,請前往 https://www.github.com,然後前往 https://aka.ms/fruit-objects。
2. 在自訂視覺入口網站 [https://customvision.ai](https://customvision.ai) 中,確保您正在處理物件偵測專案 _Grocery Detection_。然後選擇 **[新增影像]** 並上傳擷取的資料夾中的所有影像。
![透過按一下 [新增影像] 上傳下載的影像。](./images/fruit-upload.jpg)
3. 影像上傳完成後,選取第一個以開啟它。
4. 將滑鼠游標暫留在影像中的任一物件上,直到顯示自動偵測的區域,如下面的影像所示。然後選取物件,如有必要調整區域大小以便將其環繞。

或者,您可以簡單地在物件周圍拖曳以建立區域。
5. 當區域環繞物件時,新增具有適當物件類型 (*蘋果*、*香蕉*或*柳橙*) 的新標籤,如下所示:

6. 選取並標記影像中的每個其他物件,調整區域大小並根據需要新增新標籤。

7. 使用右側的**>**連結轉到下一個影像,並標記其物件。然後,只需繼續處理整個影像集合物件,標記每個蘋果、香蕉和柳橙即可。
8. 完成對最後一個影像的標記後,請關閉 **[影像詳細資料]** 編輯器,然後在 **[訓練影像]** 頁面上的 **[標記]** 下,選取 **[已標記]** 以查看所有已標記的影像:

訓練並測試模型
現在您已經在專案中標記影像,可以開始訓練模型。
1. 在自訂視覺專案中,按一下 **[訓練]** 以使用已標記的影像訓練物件偵測模型。選取 **[快速訓練]** 選項。
2. 等待訓練完成 (這可能需要十分鐘左右),*然後檢閱精確度*、*重新叫用*和*對應*效能計量,這些計量測量分類模型的預測正確性,各項計量應該都較高。
3. 在頁面的右上方,按一下 **[快速測試]**,然後在 **[影像 URL]** 方塊中,輸入 `https://aka.ms/apple-orange` 並檢視所生成的預測。然後關閉 **[快速測試]** 視窗。
發佈並使用物件偵測模型
現在,您可以準備發佈已訓練的模型,還可以從用戶端應用程式中使用該模型。
1. 在 **[效能]** 頁面的左上方,按一下 **[&128504; 發佈]** 以發佈包含以下設定的已訓練模型:
- **模型名稱**:偵測生產
- **預測資源**: *您的自訂視覺**預測**資源*。
(!)簽入
您是否使用了相同的模型名稱:**偵測生產**?
2. 發佈後,在 **[效能]** 頁面右上方按一下*設定* (&9881;) 圖示,以檢視專案設定。然後,在 **[一般]** 下 (在左側),複製**專案識別碼**。向下捲動並將其貼上到步驟 5 下面的程式碼儲存格中,取代 **YOUR_PROJECT_ID**。
> (如果您在本次練習開始時使用了**認知服務**資源,而不是建立**自訂視覺**資源,則可以從專案設定的右側複製其金鑰和端點,將其貼上到下面的程式碼儲存格中,然後執行該資源以查看結果。否則,請繼續完成以下步驟以獲取自訂視覺預測資源的金鑰和*端點*)。
3. 在 **[專案設定]** 頁面的左上方,按一下*專案資源庫*(&128065;) 圖示以退回到自訂視覺入口網站首頁,現在您的專案已在其中列出。
4. 在自訂視覺入口網站首頁的右上方,按一下*設定*(&9881;) 圖示以檢視自訂視覺服務的設定。然後,在**資源**下方,展開您的**預測**資源 (而非訓練資源) 並將其**金鑰**和**端點**值複製到步驟 5 下面的程式碼儲存格,取代 **YOUR_KEY** 和 **YOUR_ENDPOINT**。
(!)簽入
若您使用的是**自訂視覺**資源,您是否使用了**預測**資源 (而非 訓練資源)?
5. 透過按一下 [執行儲存格] &9655; 按鈕 (位於儲存格左上方) 執行下面的程式碼儲存格來為您的專案識別碼、金鑰和端點值設定變數。
###Code
project_id = 'YOUR_PROJECT_ID' # Replace with your project ID
cv_key = 'YOUR_KEY' # Replace with your prediction resource primary key
cv_endpoint = 'YOUR_ENDPOINT' # Replace with your prediction resource endpoint
model_name = 'detect-produce' # this must match the model name you set when publishing your model iteration exactly (including case)!
print('Ready to predict using model {} in project {}'.format(model_name, project_id))
###Output
_____no_output_____
###Markdown
現在,您可以將金鑰和端點與自訂視覺用戶端一起使用,以連線到您的自訂視覺物件偵測模型。
執行以下程式碼儲存格,其使用您的模型來偵測影像中的個別產品。
> **備註**:請勿過於擔心程式碼的詳細資料。其將 Python SDK 用於自訂視覺服務,以向您的模型提交影像並取出針對已偵測物件的預測。每個預測都由一個類別名稱 (蘋果**、香蕉**或柳橙**) 和周框方塊**座標組成,這些座標指示影像中已偵測到的預測物件之位置。然後,程式碼使用此資訊在影像上的每個物件周圍繪製一個帶標籤的方塊。
###Code
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
from msrest.authentication import ApiKeyCredentials
from matplotlib import pyplot as plt
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import os
%matplotlib inline
# Load a test image and get its dimensions
test_img_file = os.path.join('data', 'object-detection', 'produce.jpg')
test_img = Image.open(test_img_file)
test_img_h, test_img_w, test_img_ch = np.array(test_img).shape
# Get a prediction client for the object detection model
credentials = ApiKeyCredentials(in_headers={"Prediction-key": cv_key})
predictor = CustomVisionPredictionClient(endpoint=cv_endpoint, credentials=credentials)
print('Detecting objects in {} using model {} in project {}...'.format(test_img_file, model_name, project_id))
# Detect objects in the test image
with open(test_img_file, mode="rb") as test_data:
results = predictor.detect_image(project_id, model_name, test_data)
Create a figure to display the results
fig = plt.figure(figsize=(8, 8))
plt.axis('off')
# Display the image with boxes around each detected object
draw = ImageDraw.Draw(test_img)
lineWidth = int(np.array(test_img).shape[1]/100)
object_colors = {
"apple": "lightgreen",
"banana": "yellow",
"orange": "orange"
}
for prediction in results.predictions:
color = 'white' # default for 'other' object tags
if (prediction.probability*100) > 50:
if prediction.tag_name in object_colors:
color = object_colors[prediction.tag_name]
left = prediction.bounding_box.left * test_img_w
top = prediction.bounding_box.top * test_img_h
height = prediction.bounding_box.height * test_img_h
width = prediction.bounding_box.width * test_img_w
points = ((left,top), (left+width,top), (left+width,top+height), (left,top+height),(left,top))
draw.line(points, fill=color, width=lineWidth)
plt.annotate(prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100),(left,top), backgroundcolor=color)
plt.imshow(test_img)
###Output
_____no_output_____
###Markdown
개체 감지
*개체 감지*는 이미지에서 개별 개체 인스턴스를 분류하고 해당 위치를 나타내는 *경계 상자*를 표시하도록 기계 학습 모델이 학습된 Computer Vision 형태입니다. *이미지 분류*("무엇에 대한 이미지인가?"라는 질문에 답변하는 모델)가 진화하여 "이 이미지에 어떤 개체가 어디에 있는가?"를 모델에 물어볼 수 있는 솔루션으로 발전한 것이라 생각하면 됩니다.

예를 들어 식료품점은 개체 감지 모델을 사용함으로써 카메라를 통해 컨베이어 벨트를 스캔하는 자동 계산 시스템을 구현할 수 있으며, 각 품목을 벨트에 놓고 따로 스캔할 필요 없이 특정 품목을 식별할 수 있습니다.
Microsoft Azure의 **Custom Vision** Cognitive Service는 사용자 지정 개체 감지 모델을 만들고 게시하기 위한 클라우드 기반 솔루션을 제공합니다.
Custom Vision 리소스 만들기
Custom Vision 서비스를 사용하려면 모델을 학습시키는 데 사용할 수 있는 Azure 리소스 그리고 애플리케이션에서 사용하도록 모델을 게시할 수 있는 리소스가 필요합니다. 이러한 각 작업에 동일한 리소스를 사용하거나, 두 리소스가 동일한 영역에서 생성된 경우 각각에 대해 다른 리소스를 사용하여 비용을 따로 할당할 수 있습니다. 두 작업 중 하나 또는 둘 다에 사용되는 리소스는 일반 **Cognitive Services** 리소스일 수도 있고, 특정 **Custom Vision** 리소스일 수도 있습니다. 다음 지침을 따라 새로운 **Custom Vision** 리소스를 만듭니다(기존 리소스가 있으면 이를 사용할 수도 있음).
1. 새 브라우저 탭에서 Azure Portal([https://portal.azure.com](https://portal.azure.com)) 을 열고, Azure 구독과 연결된 Microsoft 계정을 사용하여 로그인합니다.
2. **&65291;리소스 만들기** 단추를 선택하고, *Custom Vision*을 검색하고, 다음 설정을 사용하여 **Custom Vision** 리소스를 만듭니다.
- **옵션 만들기**: 모두
- **구독**: *사용자의 Azure 구독*
- **리소스 그룹**: *고유한 이름의 새 리소스 그룹 선택 또는 만들기*
- **이름**: *고유한 이름 입력*
- **학습 위치**: *사용 가능한 지역 선택*
- **학습 가격 책정 계층**: F0
- **예측 위치**: *학습 위치와 동일*
- **예측 가격 책정 계층**: F0
> **참고**: 구독에 F0 Custom Vision 서비스가 이미 있는 경우에는 해당 서비스에 대해 **S0**을 선택합니다.
3. 리소스가 생성될 때까지 기다립니다.
Custom Vision 프로젝트 만들기
개체 감지 모델을 학습시키기 위해 학습 리소스를 기반으로 Custom Vision 프로젝트를 만들어야 합니다. 이를 위해 Custom Vision 포털을 사용합니다.
1. 새 브라우저 탭에서 Custom Vision Portal([https://customvision.ai](https://customvision.ai)) 을 열고, Azure 구독과 연결된 Microsoft 계정을 사용하여 로그인합니다.
2. 다음 설정을 사용하여 새 프로젝트를 만듭니다.
- **이름**: Grocery Detection
- **설명**: 식료품을 위한 개체 감지.
- **리소스**: *앞서 만든 Custom Vision 리소스*
- **프로젝트 유형**: 개체 감지
- **도메인**: 일반
3. 프로젝트가 생성되고 브라우저에서 열릴 때까지 기다립니다.
이미지 추가 및 태깅
개체 감지 모델을 학습시키려면 모델이 식별하도록 할 클래스가 포함된 이미지를 업로드하고, 각 개체 인스턴스의 경계 상자를 나타내도록 태깅해야 합니다.
1. https://aka.ms/fruit-objects에서 학습 이미지를 다운로드하고 압축 해제합니다. 압축 해제된 폴더에는 과일 이미지 모음이 들어 있습니다. **참고:** 임시 해결 방법으로는 학습 이미지에 액세스할 수 없는 경우 https://www.github.com으로 이동한 다음 https://aka.ms/fruit-objects로 이동합니다.
2. 사용자 지정 Vision 포털([https://customvision.ai](https://customvision.ai))에서는 개체 감지 프로젝트 _Grocery Detection_에서 작업하고 있는지 확인합니다. **이미지 추가**를 선택하고 압축을 해제한 폴더의 모든 이미지를 업로드합니다.

3. 이미지를 업로드한 후에는 첫 번째 이미지를 선택하여 엽니다.
4. 아래 이미지와 같이 자동으로 감지된 영역이 표시될 때까지 이미지 안의 개체 위에 마우스를 올려 놓습니다. 그런 다음에 개체를 선택하고, 필요하면 주변을 둘러싸도록 영역 크기를 조정합니다.

또는 간단히 개체를 끌어서 영역을 만들 수 있습니다.
5. 영역이 개체를 둘러싸게 되면 여기에 나온 것처럼 적절한 개체 유형의 새로운 태그(*apple*, *banana* 또는 *orange*)를 추가합니다.

6. 이미지에서 각 개체를 선택하고 태깅하고, 필요에 따라 영역 크기를 조정하고 새로운 태그를 추가합니다.

7. 오른쪽의 **>** 링크를 사용하여 다음 이미지로 이동하고 해당 개체를 태깅합니다. 이런 식으로 전체 이미지 모음이 완료될 때까지 진행하면서 각각의 사과, 바나나, 오렌지를 태깅합니다.
8. 마지막 이미지의 태깅을 마쳤으면 **이미지 세부 사항** 편집기를 닫고 **학습 이미지** 페이지의 **태그** 아래에서 **태깅됨**을 선택하여 태깅된 모든 이미지를 표시합니다.

모델 학습 및 테스트
이제 프로젝트에서 이미지를 태깅했으므로 모델을 학습시킬 준비가 되셨습니다.
1. Custom Vision 프로젝트에서 **학습**을 클릭하여 태깅된 이미지로 개체 감지 모델을 학습시킵니다. **빠른 학습** 옵션을 선택합니다.
2. 학습이 완료될 때까지 기다리고(10분 정도 걸릴 수 있음) *정확성*, *리콜* 및 *mAP* 성능 메트릭을 검토합니다. 이러한 메트릭은 분류 모델의 예측 정확도를 측정하며 모두 높아야 합니다.
3. 페이지 오른쪽 상단에서 **빠른 테스트**를 클릭하고, **이미지 URL** 상자에 `https://aka.ms/apple-orange` 를 입력하고, 생성되는 예측을 봅니다. 그런 다음에 **빠른 테스트** 창을 닫습니다.
개체 감지 모델 게시 및 소비
이제 학습된 모델을 게시하고 클라이언트 애플리케이션에서 사용할 준비가 되었습니다.
1. **성능** 페이지의 왼쪽 상단에서 **&128504; 게시**를 클릭하여 학습된 모델을 다음 설정과 함께 게시합니다.
- **모델 이름**: detect-produce
- **예측 리소스**: *Custom Vision **예측** 리소스*.
(!) 체크 인
동일한 모델 이름, **detect-produce**를 사용하셨습니까?
2. 게시한 후에 **성능** 페이지 오른쪽 상단의 *설정*(&9881;) 아이콘을 클릭하여 프로젝트 설정을 봅니다. 그런 다음에 **일반**(왼쪽에 있음) 아래에서 **프로젝트 ID**를 복사합니다. 아래로 스크롤하고 5단계 아래의 코드 셀에 붙여 넣어 **YOUR_PROJECT_ID**를 대체합니다.
> (*이 연습을 시작할 때 **Custom Vision** 리소스를 만드는 대신 **Cognitive Services** 리소스를 사용한 경우에는 프로젝트 설정의 오른쪽에서 해당 키 및 엔드포인트를 복사하고, 아래의 코드 셀에 붙여 넣고, 이를 실행하여 결과를 볼 수 있습니다. 아니면 계속해서 아래의 단계를 완료하여 Custom Vision 예측 리소스의 키와 엔드포인트를 가져오세요*).
3. **프로젝트 설정** 페이지의 왼쪽 상단에서 *프로젝트 갤러리*(&128065;) 아이콘을 클릭하여 Custom Vision 포털 홈 페이지로 돌아가면 이제 프로젝트가 나열되어 있습니다.
4. Custom Vision 포털 홈 페이지의 오른쪽 상단에서 *설정*(&9881;) 아이콘을 클릭하여 Custom Vision 서비스의 설정을 봅니다. 그런 다음에 **리소스** 아래에서 *예측* 리소스(학습 리소스가 아님)를 확장하고 해당 **키** 및 **엔드포인트** 값을 5단계 아래의 코드 셀에 붙여 넣어 **YOUR_KEY** 및 **YOUR_ENDPOINT**를 대체합니다.
(!) 체크 인
**Custom Vision** 리소스를 사용하는 중이라면 **예측** 리소스(학습 리소스가 아님)를 사용하셨습니까?
5. 셀 실행 &9655; 단추(셀 왼쪽 상단에 있음)를 클릭하여 변수를 프로젝트 ID, 키 및 엔드포인트 값으로 설정하고 아래의 코드 셀을 실행합니다.
###Code
project_id = 'YOUR_PROJECT_ID' # Replace with your project ID
cv_key = 'YOUR_KEY' # Replace with your prediction resource primary key
cv_endpoint = 'YOUR_ENDPOINT' # Replace with your prediction resource endpoint
model_name = 'detect-produce' # this must match the model name you set when publishing your model iteration exactly (including case)!
print('Ready to predict using model {} in project {}'.format(model_name, project_id))
###Output
_____no_output_____
###Markdown
이제 키와 엔드포인트를 Custom Vision 클라이언트와 함께 사용하여 Custom Vision 개체 감지 모델에 연결할 수 있습니다.
직접 만든 모델을 사용하여 이미지에서 개별 생산품을 감지하는 다음 코드 셀을 실행합니다.
> **참고**: 코드의 세부 사항에 대해 너무 걱정하지 마세요. Python SDK for the Custom Vision 서비스를 사용하여 모델에 이미지를 제출하며 감지된 개체에 대한 예측을 검색합니다. 각각의 예측은 클래스 이름(*apple*, *banana*, *orange*) 그리고 이미지 내에서 예측된 개체가 감지된 위치를 나타내는 *경계 상자* 좌표로 구성됩니다. 그런 다음에 코드는 이 정보를 사용하여 레이블이 붙은 상자를 이미지의 각 개체 주변으로 끌어다 놓습니다.
###Code
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
from msrest.authentication import ApiKeyCredentials
from matplotlib import pyplot as plt
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import os
%matplotlib inline
# Load a test image and get its dimensions
test_img_file = os.path.join('data', 'object-detection', 'produce.jpg')
test_img = Image.open(test_img_file)
test_img_h, test_img_w, test_img_ch = np.array(test_img).shape
# Get a prediction client for the object detection model
credentials = ApiKeyCredentials(in_headers={"Prediction-key": cv_key})
predictor = CustomVisionPredictionClient(endpoint=cv_endpoint, credentials=credentials)
print('Detecting objects in {} using model {} in project {}...'.format(test_img_file, model_name, project_id))
# Detect objects in the test image
with open(test_img_file, mode="rb") as test_data:
results = predictor.detect_image(project_id, model_name, test_data)
# Create a figure to display the results
fig = plt.figure(figsize=(8, 8))
plt.axis('off')
# Display the image with boxes around each detected object
draw = ImageDraw.Draw(test_img)
lineWidth = int(np.array(test_img).shape[1]/100)
object_colors = {
"apple": "lightgreen",
"banana": "yellow",
"orange": "orange"
}
for prediction in results.predictions:
color = 'white' # default for 'other' object tags
if (prediction.probability*100) > 50:
if prediction.tag_name in object_colors:
color = object_colors[prediction.tag_name]
left = prediction.bounding_box.left * test_img_w
top = prediction.bounding_box.top * test_img_h
height = prediction.bounding_box.height * test_img_h
width = prediction.bounding_box.width * test_img_w
points = ((left,top), (left+width,top), (left+width,top+height), (left,top+height),(left,top))
draw.line(points, fill=color, width=lineWidth)
plt.annotate(prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100),(left,top), backgroundcolor=color)
plt.imshow(test_img)
###Output
_____no_output_____
###Markdown
Object Detection*Object detection* is a form of computer vision in which a machine learning model is trained to classify individual instances of objects in an image, and indicate a *bounding box* that marks its location. You can think of this as a progression from *image classification* (in which the model answers the question "what is this an image of?") to building solutions where we can ask the model "what objects are in this image, and where are they?".For example, a grocery store might use an object detection model to implement an automated checkout system that scans a conveyor belt using a camera, and can identify specific items without the need to place each item on the belt and scan them individually.The **Custom Vision** cognitive service in Microsoft Azure provides a cloud-based solution for creating and publishing custom object detection models. Create a Custom Vision resourceTo use the Custom Vision service, you need an Azure resource that you can use to train a model, and a resource with which you can publish it for applications to use. You can use the same resource for each of these tasks, or you can use different resources for each to allocate costs separately provided both resources are created in the same region. The resource for either (or both) tasks can be a general **Cognitive Services** resource, or a specific **Custom Vision** resource. Use the following instructions to create a new **Custom Vision** resource (or you can use an existing resource if you have one).1. In a new browser tab, open the Azure portal at [https://portal.azure.com](https://portal.azure.com), and sign in using the Microsoft account associated with your Azure subscription.2. Select the **&65291;Create a resource** button, search for *custom vision*, and create a **Custom Vision** resource with the following settings: - **Create options**: Both - **Subscription**: *Your Azure subscription* - **Resource group**: *Select or create a resource group with a unique name* - **Name**: *Enter a unique name* - **Training location**: *Choose any available region* - **Training pricing tier**: F0 - **Prediction location**: *The same as the training location* - **Prediction pricing tier**: F0 > **Note**: If you already have an F0 custom vision service in your subscription, select **S0** for this one.3. Wait for the resource to be created. Create a Custom Vision projectTo train an object detection model, you need to create a Custom Vision project based on your training resource. To do this, you'll use the Custom Vision portal.1. In a new browser tab, open the Custom Vision portal at [https://customvision.ai](https://customvision.ai), and sign in using the Microsoft account associated with your Azure subscription.2. Create a new project with the following settings: - **Name**: Grocery Detection - **Description**: Object detection for groceries. - **Resource**: *The Custom Vision resource you created previously* - **Project Types**: Object Detection - **Domains**: General3. Wait for the project to be created and opened in the browser. Add and tag imagesTo train an object detection model, you need to upload images that contain the classes you want the model to identify, and tag them to indicate bounding boxes for each object instance.1. Download and extract the training images from https://aka.ms/fruit-objects. The extracted folder contains a collection of images of fruit.2. In the Custom Vision portal, in your object detection project, select **Add images** and upload all of the images in the extracted folder.3. After the images have been uploaded, select the first one to open it.4. Hold the mouse over any object in the image until an automatically detected region is displayed like the image below. Then select the object, and if necessary resize the region to surround it.Alternatively, you can simply drag around the object to create a region.5. When the region surrounds the object, add a new tag with the appropriate object type (*apple*, *banana*, or *orange*) as shown here:6. Select and tag each other object in the image, resizing the regions and adding new tags as required.7. Use the **>** link on the right to go to the next image, and tag its objects. Then just keep working through the entire image collection, tagging each apple, banana, and orange.8. When you have finished tagging the last image, close the **Image Detail** editor and on the **Training Images** page, under **Tags**, select **Tagged** to see all of your tagged images: Train and test a modelNow that you've tagged the images in your project, you're ready to train a model.1. In the Custom Vision project, click **Train** to train an object detection model using the tagged images. Select the **Quick Training** option.2. Wait for training to complete (it might take ten minutes or so), and then review the *Precision*, *Recall*, and *mAP* performance metrics - these measure the prediction accuracy of the classification model, and should all be high.3. At the top right of the page, click **Quick Test**, and then in the **Image URL** box, enter `https://aka.ms/apple-orange` and view the prediction that is generated. Then close the **Quick Test** window. Publish and consume the object detection modelNow you're ready to publish your trained model and use it from a client application.1. At the top left of the **Performance** page, click **&128504; Publish** to publish the trained model with the following settings: - **Model name**: detect-produce - **Prediction Resource**: *Your custom vision **prediction** resource*. (!) Check In Did you use the same model name: **detect-produce**? 2. After publishing, click the *settings* (&9881;) icon at the top right of the **Performance** page to view the project settings. Then, under **General** (on the left), copy the **Project Id**. Scroll down and paste it into the code cell below step 5 replacing **YOUR_PROJECT_ID**. > (*if you used a **Cognitive Services** resource instead of creating a **Custom Vision** resource at the beginning of this exercise, you can copy its key and endpoint from the right side of the project settings, paste it into the code cell below, and run it to see the results. Otherwise, continue completing the steps below to get the key and endpoint for your Custom Vision prediction resource*).3. At the top left of the **Project Settings** page, click the *Projects Gallery* (&128065;) icon to return to the Custom Vision portal home page, where your project is now listed.4. On the Custom Vision portal home page, at the top right, click the *settings* (&9881;) icon to view the settings for your Custom Vision service. Then, under **Resources**, expand your *prediction* resource (not the training resource) and copy its **Key** and **Endpoint** values to the code cell below step 5, replacing **YOUR_KEY** and **YOUR_ENDPOINT**. (!) Check In If you are using a **Custom Vision** resource, did you use the **prediction** resource (not the training resource)?5. Run the code cell below by clicking the Run Cell &9655 button (at the top left of the cell) to set the variables to your project ID, key, and endpoint values.
###Code
project_id = 'YOUR_PROJECT_ID' # Replace with your project ID
cv_key = 'YOUR_KEY' # Replace with your prediction resource primary key
cv_endpoint = 'YOUR_ENDPOINT' # Replace with your prediction resource endpoint
model_name = 'detect-produce' # this must match the model name you set when publishing your model iteration exactly (including case)!
print('Ready to predict using model {} in project {}'.format(model_name, project_id))
###Output
_____no_output_____
###Markdown
Now you can use your key and endpoint with a Custom Vision client to connect to your custom vision object detection model.Run the following code cell, which uses your model to detect individual produce items in an image.> **Note**: Don't worry too much about the details of the code. It uses the Python SDK for the Custom Vision service to submit an image to your model and retrieve predictions for detected objects. Each prediction consists of a class name (*apple*, *banana*, or *orange*) and *bounding box* coordinates that indicate where in the image the predicted object has been detected. The code then uses this information to draw a labelled box around each object on the image.
###Code
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
from msrest.authentication import ApiKeyCredentials
from matplotlib import pyplot as plt
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import os
%matplotlib inline
# Load a test image and get its dimensions
test_img_file = os.path.join('data', 'object-detection', 'produce.jpg')
test_img = Image.open(test_img_file)
test_img_h, test_img_w, test_img_ch = np.array(test_img).shape
# Get a prediction client for the object detection model
credentials = ApiKeyCredentials(in_headers={"Prediction-key": cv_key})
predictor = CustomVisionPredictionClient(endpoint=cv_endpoint, credentials=credentials)
print('Detecting objects in {} using model {} in project {}...'.format(test_img_file, model_name, project_id))
# Detect objects in the test image
with open(test_img_file, mode="rb") as test_data:
results = predictor.detect_image(project_id, model_name, test_data)
# Create a figure to display the results
fig = plt.figure(figsize=(8, 8))
plt.axis('off')
# Display the image with boxes around each detected object
draw = ImageDraw.Draw(test_img)
lineWidth = int(np.array(test_img).shape[1]/100)
object_colors = {
"apple": "lightgreen",
"banana": "yellow",
"orange": "orange"
}
for prediction in results.predictions:
color = 'white' # default for 'other' object tags
if (prediction.probability*100) > 50:
if prediction.tag_name in object_colors:
color = object_colors[prediction.tag_name]
left = prediction.bounding_box.left * test_img_w
top = prediction.bounding_box.top * test_img_h
height = prediction.bounding_box.height * test_img_h
width = prediction.bounding_box.width * test_img_w
points = ((left,top), (left+width,top), (left+width,top+height), (left,top+height),(left,top))
draw.line(points, fill=color, width=lineWidth)
plt.annotate(prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100),(left,top), backgroundcolor=color)
plt.imshow(test_img)
###Output
_____no_output_____
###Markdown
Deteksi Objek
*Deteksi objek* adalah bentuk Visi Komputer tempat model pembelajaran mesin dilatih untuk mengklasifikasikan setiap instans dari objek pada gambar, dan menunjukkan *kotak pembatas* yang menandai lokasinya. Anda dapat menganggap ini sebagai perkembangan dari *klasifikasi gambar* (di mana model menjawab pertanyaan "gambar apakah ini?") untuk membuat solusi agar kita dapat menanyakan pada model "objek apa yang ada pada gambar ini, dan di mana mereka?".

Misalnya, toko kelontong dapat menggunakan model deteksi objek untuk menerapkan sistem checkout otomatis yang memindai ban berjalan menggunakan kamera, dan dapat mengidentifikasi item tertentu tanpa perlu menempatkan masing-masing item pada ban dan memindainya satu per satu.
Layanan kognitif **Custom Vision** di Microsoft Azure memberikan solusi berbasis awan untuk membuat dan memublikasikan model deteksi objek kustom.
Membuat sumber daya Custom Vision
Untuk menggunakan layanan Custom Vision, Anda memerlukan sumber daya Azure yang dapat Anda gunakan untuk melatih model, dan sumber daya yang dapat dipublikasikan agar dapat digunakan aplikasi. Anda dapat menggunakan sumber daya yang sama untuk masing-masing tugas ini, atau menggunakan sumber daya berbeda untuk masing-masing guna mengalokasikan biaya secara terpisah yang diberikan kedua sumber daya yang dibuat di wilayah yang sama. Sumber daya untuk salah satu (atau keduanya) tugas dapat berupa sumber daya **Cognitive Services** umum, atau sumber daya **Custom Vision** khusus. Gunakan petunjuk berikut untuk membuat sumber daya **Custom Vision** yang baru. (Atau Anda dapat menggunakan sumber daya yang ada jika memiliki satu).
1. Di tab browser baru, buka portal Azure di [https://portal.azure.com](https://portal.azure.com), dan masuk menggunakan akun Microsoft yang terkait dengan langganan Azure Anda.
2. Pilih tombol **&65291;Buat sumber daya**, cari *custom vision*, dan buat sumber daya **Custom Vision** dengan pengaturan berikut:
- **Buat opsi**: Keduanya
- **Langganan**: *Langganan Azure Anda*
- **Grup sumber daya**: *Pilih atau buat grup sumber daya dengan nama unik*
- **Nama**: *Masukkan nama unik*
- **Lokasi pelatihan**: *Pilih wilayah yang tersedia*
- **Tingkat harga pelatihan**: F0
- **Lokasi prediksi**: *Sama dengan lokasi pelatihan*
- **Tingkat harga prediksi**: F0
>Catatan: Jika Anda sudah memiliki layanan custom vision F0 di langganan, pilih **S0** untuk yang satu ini.
3. Tunggu hingga sumber daya dibuat.
Membuat proyek Custom Vision
Untuk melatih model deteksi objek, Anda harus membuat proyek Custom Vision berdasarkan sumber daya latihan. Untuk melakukannya, Anda akan menggunakan portal Custom Vision.
1. Di tab browser baru, buka portal Custom Vision di [https://customvision.ai](https://customvision.ai), dan masuk menggunakan akun Microsoft yang terkait dengan langganan Azure Anda.
2. Membuat proyek baru dengan pengaturan berikut:
- **Nama**: Deteksi Belanjaan
- **Deskripsi**: Deteksi objek untuk belanjaan.
- **Sumber daya**: *Sumber daya Custom Vision yang Anda buat sebelumnya*
- **Jenis Proyek**: Deteksi Objek
- **Domain**: Umum
3. Tunggu hingga proyek dibuat dan dibuka di browser.
Menambah dan menandai gambar
Untuk melatih model deteksi objek, Anda harus mengunggah gambar yang berisi kelas yang diinginkan untuk diidentifikasi oleh model, dan menandainya untuk menunjukkan kotak pembatas bagi masing-masing instans objek.
1. Unduh dan ekstrak gambar pelatihan dari https://aka.ms/fruit-objects. Folder yang diekstrak berisi kumpulan gambar buah. **Catatan:** sebagai solusi sementara, jika Anda tidak dapat mengakses gambar pelatihan, buka https://www.github.com, lalu buka https://aka.ms/fruit-objects.
2. Di portal Custom Vision [https://customvision.ai](https://customvision.ai), pastikan Anda bekerja di proyek deteksi objek _Grocery Detection_. Kemudian pilih **Tambah gambar** dan unggah semua gambar dalam folder yang diekstrak.

3. Setelah gambar diunggah, pilih yang pertama untuk membukanya.
4. Tahan mouse di atas objek apa pun pada gambar hingga wilayah yang terdeteksi secara otomatis ditampilkan seperti gambar di bawah. Lalu pilih objek, dan jika perlu ubah ukuran wilayah di sekitarnya.

Selain itu, Anda dapat menyeret di sekitar objek untuk membuat wilayah.
5. Bila wilayah mengelilingi objek, tambah tanda baru dengan jenis objek yang sesuai (*apel*, *pisang*, atau *jeruk*) seperti ditampilkan di sini:

6. Pilih dan tandai masing-masing objek lain pada gambar, ubah ukuran wilayah dan tambah tanda baru bila perlu.

7. Gunakan tautan **>** di sebelah kanan untuk membuka gambar berikutnya, dan menandai objeknya. Lalu, terus kerjakan hingga seluruh kumpulan gambar, memberi tanda pada setiap apel, pisang, dan jeruk.
8. Saat Anda selesai menandai gambar terakhir, tutup editor **Detal Gambar** dan di halaman **Gambar Pelatihan**, di bawah **Tanda**, pilih **Ditandai** untuk melihat semua gambar yang ditandai:

Melatih dan menguji model
Sekarang setelah menandai gambar di proyek, Anda siap untuk melatih model.
1. Di proyek Custom Vision, klik **Latih** untuk melatih model deteksi objek menggunakan gambar yang ditandai. Pilih opsi **Pelatihan Cepat**.
2. Tunggu hingga pelatihan selesai (perlu waktu kurang lebih sepuluh menit), lalu tinjau metrik performa *Presisi*, *Pendahuluan*, dan *mAP* - ketiganya mengukur akurasi prediksi model klasifikasi, dan semuanya harus tinggi.
3. Di sebelah kanan atas halaman, klik **Uji Cepat**, lalu di kotak **URL Gambar**, masukkan `https://aka.ms/apple-orange` dan lihat prediksi yang dihasilkan. Lalu, tutup jendela **Uji Cepat**.
Memublikasikan dan mengggunakan model deteksi objek
Sekarang, Anda siap untuk memublikasikan model latihan Anda dan menggunakannya dari aplikasi klien.
1. Di bagian kiri atas halaman **Performa**, klik **&128504; Publikasikan** untuk memublikasikan model yang telah dilatih dengan pengaturan berikut:
- **Nama model**: hasil deteksi
- **Sumber Daya Prediksi**: *Sumber daya **prediksi** Custom Vision Anda*.
(!) Cek Masuk
Apakah Anda menggunakan nama model yang sama: **hasil deteksi**?
2. Setelah memublikasikan, klik ikon *pengaturan* (&9881;) di bagian kanan atas halaman **Performa** untuk melihat pengaturan proyek. Lalu, di **Umum** (di kiri), salin **ID Proyek**. Gulir ke bawah dan tempel ke sel kode di bawah langkah 5 menggantikan **YOUR_PROJECT_ID**.
> (*jika Anda menggunakan sumber daya **Cognitive Services** daripada membuat sumber daya **Custom Vision** di awal latihan ini, Anda dapat menyalin kunci dan titik akhirnya dari sisi kanan pengaturan proyek, menempelnya ke sel kode di bawah, dan menjalankannya untuk melihat hasilnya. Jika tidak, lanjutkan menyelesaikan langkah-langkah di bawah untuk mendapatkan kunci dan titik akhir untuk sumber daya prediksi Custom Vision Anda*).
3. Di bagian kiri atas halaman **Pengaturan Proyek**, klik ikon *Galeri Proyek* (&128065;) untuk kembali ke halaman beranda portal Custom Vision, tempat proyek Anda kini terdaftar.
4. Di halaman beranda portal Custom Vision, di bagian kanan atas, klik ikon *pengaturan* (&9881;) untuk melihat pengaturan layanan Custom Vision Anda. Lalu, di **Sumber daya**, perluas sumber daya *prediksi* (bukan sumber daya pelatihan) dan salin nilai **Kunci** dan **Titik akhir** tersebut ke sel kode di bawah langkah 5, menggantikan **YOUR_KEY** dan **YOUR_ENDPOINT**.
(!) Cek Masuk
Jika Anda menggunakan sumber daya **Custom Vision**, apakah Anda menggunakan sumber daya **prediksi** (bukan sumber daya pelatihan)?
5. Jalankan sel kode di bawah dengan mengeklik tombol Jalankan Sel &9655; (di bagian kiri atas sel) untuk mengatur variabel ID proyek, kunci dan nilai titik akhir Anda.
###Code
project_id = 'YOUR_PROJECT_ID' # Replace with your project ID
cv_key = 'YOUR_KEY' # Replace with your prediction resource primary key
cv_endpoint = 'YOUR_ENDPOINT' # Replace with your prediction resource endpoint
model_name = 'detect-produce' # this must match the model name you set when publishing your model iteration exactly (including case)!
print('Ready to predict using model {} in project {}'.format(model_name, project_id))
###Output
_____no_output_____
###Markdown
Sekarang Anda dapat menggunakan kunci dan titik akhir dengan klien Custom Vision untuk tersambung ke model deteksi objek Custom Vision.
Jalankan sel kode berikut, yang menggunakan model Anda untuk mendeteksi setiap item yang dihasilkan pada gambar.
> **Catatan**: Jangan terlalu khawatir dengan detail kode. Detail kode menggunakan SDK Python untuk layanan Custom Vision guna mengirim gambar ke model dan mengambil prediksi untuk objek yang terdeteksi. Masing-masing prediksi terdiri dari nama kelas (*apel*, *pisang*, atau *jeruk*) dan koordinat *kotak pembatas* yang menunjukkan tempat pada gambar objek yang diprediksi telah terdeteksi. Kemudian, kode menggunakan informasi ini untuk menggambar kotak berlabel di sekitar masing-masing objek pada gambar.
###Code
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
from msrest.authentication import ApiKeyCredentials
from matplotlib import pyplot as plt
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import os
%matplotlib inline
# Load a test image and get its dimensions
test_img_file = os.path.join('data', 'object-detection', 'produce.jpg')
test_img = Image.open(test_img_file)
test_img_h, test_img_w, test_img_ch = np.array(test_img).shape
# Get a prediction client for the object detection model
credentials = ApiKeyCredentials(in_headers={"Prediction-key": cv_key})
predictor = CustomVisionPredictionClient(endpoint=cv_endpoint, credentials=credentials)
print('Detecting objects in {} using model {} in project {}...'.format(test_img_file, model_name, project_id))
# Detect objects in the test image
with open(test_img_file, mode="rb") as test_data:
results = predictor.detect_image(project_id, model_name, test_data)
# Create a figure to display the results
fig = plt.figure(figsize=(8, 8))
plt.axis('off')
# Display the image with boxes around each detected object
draw = ImageDraw.Draw(test_img)
lineWidth = int(np.array(test_img).shape[1]/100)
object_colors = {
"apple": "lightgreen",
"banana": "yellow",
"orange": "orange"
}
for prediction in results.predictions:
color = 'white' # default for 'other' object tags
if (prediction.probability*100) > 50:
if prediction.tag_name in object_colors:
color = object_colors[prediction.tag_name]
left = prediction.bounding_box.left * test_img_w
top = prediction.bounding_box.top * test_img_h
height = prediction.bounding_box.height * test_img_h
width = prediction.bounding_box.width * test_img_w
points = ((left,top), (left+width,top), (left+width,top+height), (left,top+height),(left,top))
draw.line(points, fill=color, width=lineWidth)
plt.annotate(prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100),(left,top), backgroundcolor=color)
plt.imshow(test_img)
###Output
_____no_output_____
###Markdown
物体检测
*物体检测*是计算机视觉的一种形式,在这种形式中,机器学习模型被训练为对图像中的各个物体实例进行分类,并指示一个标记其位置的*边界框*。可以将其视为从*图像分类*(在此阶段模型回答问题“这是什么物体的图像”)到构建解决方案(在此阶段我们可以问模型“这个图像中有什么物体,它们在什么位置?”)的过程。

例如,一家商店也许可以使用物体检测模型来实现自动结帐系统,该系统使用摄像头扫描传送带并能识别特定商品,而无需把每个商品都放在传送带上单独扫描。
Microsoft Azure 中的**自定义视觉**认知服务为创建和发布自定义物体检测模型提供了基于云的解决方案。
创建自定义视觉资源
要使用自定义视觉服务,需要具有可用于训练模型的 Azure 资源,以及可用于发布模型以供应用程序使用的资源。在完成这些任务时,可以使用相同的资源,也可以为每项任务使用不同的资源以单独分配成本(如果两个资源在同一区域中创建)。用于其中一个(或两个)任务的资源可以是常规的**认知服务**资源,也可以是特定的**自定义视觉**资源。请按照以下说明创建一个新的**自定义视觉**资源(你也可以使用现有的资源)。
1. 在新的浏览器标签页中打开 Azure 门户 ([https://portal.azure.com](https://portal.azure.com)),使用与你的 Azure 订阅关联的 Microsoft 帐户进行登录。
2. 选择“**&65291;创建资源**”按钮,搜索“*自定义视觉*”并以如下设置创建**自定义视觉**资源:
- **创建选项**:均可
- **订阅**: *你的 Azure 订阅*
- **资源组**: *选择或创建具有唯一名称的资源组*
- **名称**: *输入一个唯一名称*
- **训练位置**: *选择任何可用区域*
- **训练定价层**:中的机器人 F0
- **预测位置**: *与训练位置保持一致*
- **预测定价层**:中的机器人 F0
> **备注**:如果在你的订阅中已有 F0 自定义视觉服务,此处请选择“**S0**”。
3. 等待资源创建完成。
创建自定义视觉项目
要训练物体检测模型,需要根据训练资源创建自定义视觉项目。为此,需要使用自定义视觉门户。
1. 在新的浏览器选项卡中打开自定义视觉门户 ([https://customvision.ai](https://customvision.ai)),使用与你的 Azure 订阅关联的 Microsoft 帐户进行登录。
2. 新建一个项目,设置如下:
- **名称**:商品检测
- **说明**:针对商品的物体检测。
- **资源**: *你之前创建的自定义视觉资源*
- **项目类型**:物体检测
- **领域**:常规
3. 等待项目创建完毕并在浏览器中打开。
添加图像并进行标记
要训练物体检测模型,需要上传包含你希望模型识别的类的图像,并对这些图像进行标记以指示每个物体实例的边界框。
1.从 https://aka.ms/fruit-objects 下载并提取训练图像。提取的文件夹包含一个水果的图像集合。**备注**:如果你无法访问训练图像,临时的应变方法是转到 https://www.github.com,然后转到 https://aka.ms/fruit-objects。
2. 在自定义视觉门户 [https://customvision.ai](https://customvision.ai) 中,确保你正在处理物体检测项目 _Grocery Detection_。然后选择“**添加图像**”,并上传提取文件夹中的所有图像。

3. 上传图像后,选择第一个图像将其打开。
4. 将鼠标悬停在图像中的任何物体上,直到显示一个自动检测到的区域,如下图所示。然后选择物体,并根据需要调整该区域大小,使其包围所选物体。

也可以简单地围绕该物体进行拖动,创建一个区域。
5. 当该区域包围所选物体时,添加一个具有适当物体类型的新标签(“*苹果*”、“*香蕉*”或“*橙子*”),如下所示:

6. 在图像中选择各个物体并为其添加标签,根据需要调整区域大小并添加新标签。

7. 使用右侧的“**>**”链接转至下一个图像,并为图像中的物体添加标签。然后按照这样的步骤继续处理整个图像集合,为每个苹果、香蕉和橙子添加标签。
8. 标记完最后一个图像后,关闭“**图像细节**”编辑器,并在“**训练图像**”页面上的“**标签**”下选择“**已标记**”以查看所有带有标签的图像:

训练和测试模型
你已为项目中的图像添加标签,现在可以训练模型了。
1. 在自定义视觉项目中,单击“**训练**”以使用带有标签的图像训练物体检测模型。选择“**快速训练**”选项。
2. 等待训练完成(可能需要 10 分钟左右),然后检查*精度*、*召回*率和 *mAP* 性能指标 - 这些指标用于衡量分类模型的预测准确度,且应该都很高。
3. 单击页面右上角的“**快速测试**”,然后在“**图像 URL**”框中输入 `https://aka.ms/apple-orange` 并查看生成的预测结果。然后关闭“**快速测试**”窗口。
发布并使用物体检测模型
现在即可发布已训练的模型并在客户端应用程序中使用它。
1. 单击“**性能**”页面左上角的“**&128504; 发布**”来发布已训练的模型,设置如下:
- **模型名称**:detect-produce
- **预测资源**:*你的自定义视觉**预测**资源*。
(!)签入
是否使用了相同的模型名称“**detect-produce**”?
2. 发布后,单击“**性能**”页面右上角的“*设置*”(&9881;) 图标以查看项目设置。然后在左侧的“**常规**”下复制**项目 ID**。向下滚动并将其粘贴到步骤 5 下的代码单元格中,替换“**YOUR_PROJECT_ID**”。
> (*如果在本练习开始时你没有创建**自定义视觉**资源,而是使用**认知服务**资源,可以在项目设置的右侧复制其密钥和终结点,并将其粘贴至下方的代码单元格中,然后运行它以查看结果。否则请继续完成以下步骤,获取自定义视觉预测资源的密钥和终结点。*)
3. 单击“**项目设置**”页面左上角的“*项目库*”(&128065;) 图标以返回到自定义视觉门户主页,此处现在会列出你的项目。
4. 单击自定义视觉门户主页右上角的“*设置*”(&9881;) 图标,查看自定义视觉服务的设置。然后在“**资源**”下展开预测资源(不是训练资源),并将资源的**密钥**和**终结点**值复制到步骤 5 下面的代码单元格中,分别替换“**YOUR_KEY**”和“**YOUR_ENDPOINT**”。
(!)签入
如果你使用的是**自定义视觉**资源,那你是否使用过**预测**资源(不是训练资源)?
5. 通过单击“运行单元格”&9655; 按钮(位于单元格的左侧)运行下面的代码单元格,将变量设置为你自己的项目 ID、密钥和终结点值。
###Code
project_id = 'YOUR_PROJECT_ID' # Replace with your project ID
cv_key = 'YOUR_KEY' # Replace with your prediction resource primary key
cv_endpoint = 'YOUR_ENDPOINT' # Replace with your prediction resource endpoint
model_name = 'detect-produce' # this must match the model name you set when publishing your model iteration exactly (including case)!
print('Ready to predict using model {} in project {}'.format(model_name, project_id))
###Output
_____no_output_____
###Markdown
现在你可以使用密钥和终结点通过自定义视觉客户端连接到自定义视觉物体检测模型。
运行以下代码单元格,该代码单元格使用你的模型来检测图像中的各个商品。
> **备注**:无需太担心代码的详细信息。它使用适用于自定义视觉服务的 Python SDK 向模型提交图像并检索检测到的物体的预测结果。每条预测结果都由类名(“*苹果*”、“*香蕉*”或“*橙子*”)和指示在图像中检测到预测物体的位置的*边界框*坐标组成。然后该代码会使用这些信息在图像上的每个物体周围绘制一个标记框。
###Code
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
from msrest.authentication import ApiKeyCredentials
from matplotlib import pyplot as plt
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import os
%matplotlib inline
# Load a test image and get its dimensions
test_img_file = os.path.join('data', 'object-detection', 'produce.jpg')
test_img = Image.open(test_img_file)
test_img_h, test_img_w, test_img_ch = np.array(test_img).shape
# Get a prediction client for the object detection model
credentials = ApiKeyCredentials(in_headers={"Prediction-key": cv_key})
predictor = CustomVisionPredictionClient(endpoint=cv_endpoint, credentials=credentials)
print('Detecting objects in {} using model {} in project {}...'.format(test_img_file, model_name, project_id))
# Detect objects in the test image
with open(test_img_file, mode="rb") as test_data:
results = predictor.detect_image(project_id, model_name, test_data)
# Create a figure to display the results
fig = plt.figure(figsize=(8, 8))
plt.axis('off')
# Display the image with boxes around each detected object
draw = ImageDraw.Draw(test_img)
lineWidth = int(np.array(test_img).shape[1]/100)
object_colors = {
"apple": "lightgreen",
"banana": "yellow",
"orange": "orange"
}
for prediction in results.predictions:
color = 'white' # default for 'other' object tags
if (prediction.probability*100) > 50:
if prediction.tag_name in object_colors:
color = object_colors[prediction.tag_name]
left = prediction.bounding_box.left * test_img_w
top = prediction.bounding_box.top * test_img_h
height = prediction.bounding_box.height * test_img_h
width = prediction.bounding_box.width * test_img_w
points = ((left,top), (left+width,top), (left+width,top+height), (left,top+height),(left,top))
draw.line(points, fill=color, width=lineWidth)
plt.annotate(prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100),(left,top), backgroundcolor=color)
plt.imshow(test_img)
###Output
_____no_output_____ |
templates/KmeansFTTPmeans5.ipynb | ###Markdown
***Set parameters***
###Code
__algo__ = "KMEANS" #Name of the Clustering algorithm
__emb__ = "ftt" #Name of the Word Embeddings used (glove, w2v, ftt), MUST set directory below
__sentemb__ = "pmeans5" #Name of Sentence Embedding algorithm used
recnum = 30000 #Number of records to be read from files
k = 350 #Number of Clusters
usesqrt = False #Set value of k to sqrt of recnum, overrides k
randomsample = True #Random Sampling to be True/False for records which are read
embedDir = "../../FTXSentEmbs/" #Directory where embeddings are saved for that selected embedding
modelDir = "../models/" #Directory where models are saved
megadfDir = "../MegaDfs/" #Directory Where Megadf is to be saved
plotDir = "../plots/" #Directory where plots are saved
metadataDir = "../modelMetaData/" #Directory where performance and distribution params are to be stored
dumpDir = "../dump/" #Directory where test outcomes are saved
###Output
_____no_output_____
###Markdown
Actual Code imports and time
###Code
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import cluster, datasets
from sklearn.metrics import silhouette_score, davies_bouldin_score
import seaborn as sns
import os, subprocess, sys
import datetime, time
import pickle
###Output
_____no_output_____
###Markdown
File Settings
###Code
oldlist = os.listdir(embedDir)
filelist = sorted([embedDir+f for f in oldlist if f[-3:]=="pkl"])
filenum = len(filelist)
smalllist = filelist[:filenum]
print("Length of Smalllist: ", len(smalllist))
###Output
Length of Smalllist: 218
###Markdown
Number of RecordsIt is Recommended to Set this at the top parameters
###Code
recnum = recnum
###Output
_____no_output_____
###Markdown
Read all the pandas dataframes
###Code
%%time
megadf = pd.DataFrame()
if randomsample == True:
print("randomsample: ", randomsample)
for f in smalllist:
tempdf = pd.read_pickle(f)
megadf = megadf.append(tempdf, ignore_index = True)
megadf = megadf.sample(recnum, random_state=42)
else:
print("randomsample: ", randomsample)
for f in smalllist:
tempdf = pd.read_pickle(f)
megadf = megadf.append(tempdf, ignore_index = True)
if megadf.shape[0] >= recnum:
megadf = megadf[:recnum]
break
print("megadf.shape: ", megadf.shape)
predata = megadf["embedding"]
data = np.matrix(predata.to_list())
print(data.shape)
###Output
(30000, 1500)
###Markdown
Number of ClustersIt is Recommended to Set this at the top parameters
###Code
if usesqrt == True:
print("usesqrt: ", usesqrt)
sqrt_k = int(np.sqrt(data.shape[0]))
k = int(sqrt_k)
else:
print("usesqrt: ", usesqrt)
k = k
print("k: ", k)
###Output
usesqrt: False
k: 50
###Markdown
ClusteringPlease modify the functions here to change algorithm
###Code
%%time
print("Starting Clustering Process")
start_time = time.time()
model = cluster.KMeans(n_clusters=k, n_init = 20, max_iter=1000, verbose=1, n_jobs=-1)
model.fit(data)
end_time = time.time()
timetrain = round(end_time-start_time, 2)
print("done! {}".format(timetrain))
print("k_means.fit(data) Done!")
###Output
Starting Clustering Process
done! 7.04
k_means.fit(data) Done!
CPU times: user 186 ms, sys: 119 ms, total: 305 ms
Wall time: 7.04 s
###Markdown
Saving the output data into vars
###Code
centroids = model.cluster_centers_
labels = model.labels_
megadf["clusterlabel"]=labels
centroidDF = pd.DataFrame(centroids)
###Output
_____no_output_____
###Markdown
Plotting
###Code
plt.figure(figsize=(16,16))
titlestring = "{} with k={} records={} features={} using {}".format(__algo__, k, data.shape[0], data.shape[1], __emb__)
snsplot = sns.countplot("clusterlabel", data=megadf)
snsplot.xaxis.label.set_size(20)
snsplot.yaxis.label.set_size(20)
plt.title(
titlestring,
fontdict = {'fontsize' : 30}
)
###Output
_____no_output_____
###Markdown
*Name given to saved files*
###Code
features = data.shape[1]
records = data.shape[0]
name = "{}_{}_{}_K{}_R{}_F{}".format(__algo__, __emb__, __sentemb__, k, records, features)
name
###Output
_____no_output_____
###Markdown
Saving Data Save model
###Code
modelname = "{}_model.pkl".format(name)
pickle.dump(model, open(modelDir + modelname, 'wb'))
###Output
_____no_output_____
###Markdown
Save Plot
###Code
snspltname = "{}_plt.png".format(name)
snsplot.figure.savefig(plotDir + snspltname)
###Output
_____no_output_____
###Markdown
Save Megadf
###Code
clusterdfname = "{}_clustered_megadf.pkl".format(name)
megadf.to_pickle(megadfDir + clusterdfname)
###Output
_____no_output_____
###Markdown
Save Centroids
###Code
centroidDF = pd.DataFrame(centroids)
centroidDFname = "{}_centroids.pkl".format(name)
centroidDF.to_pickle(megadfDir + centroidDFname)
print(centroidDF.shape)
###Output
(50, 250)
###Markdown
Open dataframe to test
###Code
sub = megadf.loc[:, :]
sub.tail()
megadf.columns
###Output
_____no_output_____
###Markdown
Performance Testing and Distribution
###Code
metadata = pd.DataFrame(columns=["Name", "Algo", "WordEmb", "SentEmb", "K", "R", "F", "SS", "CSavg", "CSmin", "CSmax", "T2Pavg", "T2LM", "T2LMP", "MEM"])
metadict = {
"Name":None, #Name of the save file prefix
"Algo":None, #Name of the Clustering algorithm
"WordEmb":None, #Name of the Word Embeddings used (glove, w2v, ftt)
"SentEmb":None, #Name of Sentence Embedding algorithm used
"K":None, "R":None, "F":None, #Number of clusters, records and fetures
"T2T":None, #Time required to train model
"SS":None, #Silhoutte Score
"DBS":None, #Davis Bouldin Score
"CSavg":None, #Average Cluster Size
"CSmin":None, #Minimum Cluster Size
"CSmax":None, #Maximum Cluster Size
"T2Pavg":None, #Average Time To Predict cluster of one record
"T2LM":None, #Average Time to Load Model
"T2LMP":None, #Amortized time to Predict after loading the model
"MEM":None #Memory used by the Model
}
metadict
metadict["Name"]=name
metadict["Algo"]=__algo__
metadict["WordEmb"]=__emb__
metadict["SentEmb"]=__sentemb__
metadict["K"]=k
metadict["R"]=recnum
metadict["F"]=features
metadict
###Output
_____no_output_____
###Markdown
Time to train
###Code
metadict["T2T"]=timetrain
###Output
_____no_output_____
###Markdown
Scores
###Code
ss = silhouette_score(data, labels, metric = 'euclidean')
dbs = davies_bouldin_score(data, labels)
metadict["SS"]=ss
metadict["DBS"]=dbs
###Output
_____no_output_____
###Markdown
Cluster Size
###Code
clusterdata = megadf.groupby("clusterlabel", as_index=True).size().reset_index(name="count")
clusterdata.head()
clusterdfname = "{}_clustered_counts.pkl".format(name)
clusterdata.to_pickle(megadfDir + clusterdfname)
countdata = clusterdata.groupby("count").size().reset_index(name="clusters")
display(countdata.head(3))
display(countdata.tail(3))
metadict["CSmax"] = max(clusterdata["count"])
metadict["CSmin"] = min(clusterdata["count"])
metadict["CSavg"] = np.mean(clusterdata["count"])
%matplotlib inline
plt.figure(figsize=(16,16))
sns.axes_style("whitegrid", {"axes.grid":True,
'axes.spines.left': False,
'axes.spines.bottom': False,
'axes.spines.right': False,
'axes.spines.top': False})
titlestring = "{}_Cluster_Distribution".format(name)
snsplot = sns.distplot(clusterdata["count"], kde=False, bins=max(clusterdata["count"]),
hist_kws={'edgecolor':'black'},)
snsplot.set(xlabel="Number of Papers", ylabel="Number of Clusters")
snsplot.xaxis.label.set_size(20)
snsplot.yaxis.label.set_size(20)
plt.title(
titlestring,
fontdict = {'fontsize' : 25}
)
plt.show()
snspltname = "{}_Cluster_Distribution.png".format(name)
snsplot.figure.savefig(plotDir + snspltname)
###Output
_____no_output_____
###Markdown
Prediction Time Performance
###Code
testdf = pd.DataFrame()
if recnum < 2000:
samplenum = int(recnum / 10)
else:
samplenum = 2000
for f in smalllist:
tempdf = pd.read_pickle(f)
testdf = megadf.append(tempdf, ignore_index = True, sort = False)
testdf = testdf.sample(samplenum, random_state=int(time.time()%100000))
predata = testdf["embedding"]
data = np.matrix(predata.to_list())
print(data.shape)
print("Starting Predicting Performance")
testmodel = model
start_time = time.time()
for d in data:
lb = testmodel.predict(d)
end_time = time.time()
timetest = end_time-start_time
avgtime = timetest/data.shape[0]
print("Avgtime: {} Totaltime: {}".format(avgtime, timetest))
metadict["T2Pavg"]=avgtime
print("Starting Loading Performance")
loadruns = 50
start_time = time.time()
for i in range(loadruns):
testmodel = pickle.load(open(modelDir + modelname, 'rb'))
end_time = time.time()
timetest = end_time-start_time
avgtime = timetest/loadruns
print("Avgtime: {} Totaltime: {}".format(avgtime, timetest))
metadict["T2LM"] = avgtime
avgtime
print("Starting Amortized Performance")
loadruns = 5
avglist = []
for i in range(loadruns):
start_time = time.time()
testmodel = pickle.load(open(modelDir + modelname, 'rb'))
for d in data:
lb = testmodel.predict(d)
end_time = time.time()
timetest = (end_time-start_time)/data.shape[0]
avglist.append(timetest)
timetest = np.sum(avglist)
avgtime = np.mean(avglist)
print("Avgtime: {} Totaltime: {}".format(avgtime, timetest))
metadict["T2LMP"] = avgtime
avgtime
modelsize = sys.getsizeof(pickle.dumps(model))
print("modelsize:", modelsize, "bytes")
metadict["MEM"]=modelsize
metadict
metadata = metadata.append(metadict, ignore_index=True)
metadata
metadataname = "{}_metadata.pkl".format(name)
metadata.to_pickle(metadataDir + metadataname)
###Output
_____no_output_____ |
sagemaker-experiments/mnist-handwritten-digits-classification-experiment/mnist-handwritten-digits-classification-experiment.ipynb | ###Markdown
MNIST Handwritten Digits Classification ExperimentThis demo shows how you can use SageMaker Experiment Management Python SDK to organize, track, compare, and evaluate your machine learning (ML) model training experiments.You can track artifacts for experiments, including data sets, algorithms, hyper-parameters, and metrics. Experiments executed on SageMaker such as SageMaker Autopilot jobs and training jobs will be automatically tracked. You can also track artifacts for additional steps within an ML workflow that come before/after model training e.g. data pre-processing or post-training model evaluation.The APIs also let you search and browse your current and past experiments, compare experiments, and identify best performing models.Now we will demonstrate these capabilities through an MNIST handwritten digits classification example. The experiment will be organized as follow:1. Download and prepare the MNIST dataset.2. Train a Convolutional Neural Network (CNN) Model. Tune the hyper parameter that configures the number of hidden channels in the model. Track the parameter configurations and resulting model accuracy using SageMaker Experiments Python SDK.3. Finally use the search and analytics capabilities of Python SDK to search, compare and evaluate the performance of all model versions generated from model tuning in Step 2.4. We will also see an example of tracing the complete linage of a model version i.e. the collection of all the data pre-processing and training configurations and inputs that went into creating that model version.Make sure you selected `Python 3 (Data Science)` kernel. Install Python SDKs
###Code
import sys
!{sys.executable} -m pip install sagemaker-experiments==0.1.24
###Output
_____no_output_____
###Markdown
Install PyTroch
###Code
# pytorch version needs to be the same in both the notebook instance and the training job container
# https://github.com/pytorch/pytorch/issues/25214
!{sys.executable} -m pip install torch==1.1.0
!{sys.executable} -m pip install torchvision==0.3.0
!{sys.executable} -m pip install pillow==6.2.2
!{sys.executable} -m pip install --upgrade sagemaker
###Output
_____no_output_____
###Markdown
Setup
###Code
import time
import boto3
import numpy as np
import pandas as pd
from IPython.display import set_matplotlib_formats
from matplotlib import pyplot as plt
from torchvision import datasets, transforms
import sagemaker
from sagemaker import get_execution_role
from sagemaker.session import Session
from sagemaker.analytics import ExperimentAnalytics
from smexperiments.experiment import Experiment
from smexperiments.trial import Trial
from smexperiments.trial_component import TrialComponent
from smexperiments.tracker import Tracker
set_matplotlib_formats("retina")
sess = boto3.Session()
sm = sess.client("sagemaker")
role = get_execution_role()
###Output
_____no_output_____
###Markdown
Create a S3 bucket to hold data
###Code
# create a s3 bucket to hold data, note that your account might already created a bucket with the same name
account_id = sess.client("sts").get_caller_identity()["Account"]
bucket = "sagemaker-experiments-{}-{}".format(sess.region_name, account_id)
prefix = "mnist"
try:
if sess.region_name == "us-east-1":
sess.client("s3").create_bucket(Bucket=bucket)
else:
sess.client("s3").create_bucket(
Bucket=bucket, CreateBucketConfiguration={"LocationConstraint": sess.region_name}
)
except Exception as e:
print(e)
###Output
_____no_output_____
###Markdown
DatasetWe download the MNIST hand written digits dataset, and then apply transformation on each of the image.
###Code
# TODO: can be removed after upgrade to torchvision==0.9.1
# see github.com/pytorch/vision/issues/1938 and github.com/pytorch/vision/issues/3549
datasets.MNIST.urls = [
"https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz",
"https://ossci-datasets.s3.amazonaws.com/mnist/train-labels-idx1-ubyte.gz",
"https://ossci-datasets.s3.amazonaws.com/mnist/t10k-images-idx3-ubyte.gz",
"https://ossci-datasets.s3.amazonaws.com/mnist/t10k-labels-idx1-ubyte.gz",
]
# download the dataset
# this will not only download data to ./mnist folder, but also load and transform (normalize) them
train_set = datasets.MNIST(
"mnist",
train=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
download=True,
)
test_set = datasets.MNIST(
"mnist",
train=False,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
download=False,
)
plt.imshow(train_set.data[2].numpy())
###Output
_____no_output_____
###Markdown
After transforming the images in the dataset, we upload it to s3.
###Code
inputs = sagemaker.Session().upload_data(path="mnist", bucket=bucket, key_prefix=prefix)
print("input spec: {}".format(inputs))
###Output
_____no_output_____
###Markdown
Now lets track the parameters from the data pre-processing step.
###Code
with Tracker.create(display_name="Preprocessing", sagemaker_boto_client=sm) as tracker:
tracker.log_parameters(
{
"normalization_mean": 0.1307,
"normalization_std": 0.3081,
}
)
# we can log the s3 uri to the dataset we just uploaded
tracker.log_input(name="mnist-dataset", media_type="s3/uri", value=inputs)
###Output
_____no_output_____
###Markdown
Step 1 - Set up the ExperimentCreate an experiment to track all the model training iterations. Experiments are a great way to organize your data science work. You can create experiments to organize all your model development work for : [1] a business use case you are addressing (e.g. create experiment named “customer churn prediction”), or [2] a data science team that owns the experiment (e.g. create experiment named “marketing analytics experiment”), or [3] a specific data science and ML project. Think of it as a “folder” for organizing your “files”. Create an Experiment
###Code
mnist_experiment = Experiment.create(
experiment_name=f"mnist-hand-written-digits-classification-{int(time.time())}",
description="Classification of mnist hand-written digits",
sagemaker_boto_client=sm,
)
print(mnist_experiment)
###Output
_____no_output_____
###Markdown
Step 2 - Track Experiment Now create a Trial for each training run to track the it's inputs, parameters, and metrics.While training the CNN model on SageMaker, we will experiment with several values for the number of hidden channel in the model. We will create a Trial to track each training job run. We will also create a TrialComponent from the tracker we created before, and add to the Trial. This will enrich the Trial with the parameters we captured from the data pre-processing stage.Note the execution of the following code takes a while.
###Code
from sagemaker.pytorch import PyTorch, PyTorchModel
hidden_channel_trial_name_map = {}
###Output
_____no_output_____
###Markdown
If you want to run the following training jobs asynchronously, you may need to increase your resource limit. Otherwise, you can run them sequentially.
###Code
preprocessing_trial_component = tracker.trial_component
for i, num_hidden_channel in enumerate([2, 5, 10, 20, 32]):
# create trial
trial_name = f"cnn-training-job-{num_hidden_channel}-hidden-channels-{int(time.time())}"
cnn_trial = Trial.create(
trial_name=trial_name,
experiment_name=mnist_experiment.experiment_name,
sagemaker_boto_client=sm,
)
hidden_channel_trial_name_map[num_hidden_channel] = trial_name
# associate the proprocessing trial component with the current trial
cnn_trial.add_trial_component(preprocessing_trial_component)
# all input configurations, parameters, and metrics specified in estimator
# definition are automatically tracked
estimator = PyTorch(
py_version="py3",
entry_point="./mnist.py",
role=role,
sagemaker_session=sagemaker.Session(sagemaker_client=sm),
framework_version="1.1.0",
instance_count=1,
instance_type="ml.c4.xlarge",
hyperparameters={
"epochs": 2,
"backend": "gloo",
"hidden_channels": num_hidden_channel,
"dropout": 0.2,
"kernel_size": 5,
"optimizer": "sgd",
},
metric_definitions=[
{"Name": "train:loss", "Regex": "Train Loss: (.*?);"},
{"Name": "test:loss", "Regex": "Test Average loss: (.*?),"},
{"Name": "test:accuracy", "Regex": "Test Accuracy: (.*?)%;"},
],
enable_sagemaker_metrics=True,
)
cnn_training_job_name = "cnn-training-job-{}".format(int(time.time()))
# Now associate the estimator with the Experiment and Trial
estimator.fit(
inputs={"training": inputs},
job_name=cnn_training_job_name,
experiment_config={
"TrialName": cnn_trial.trial_name,
"TrialComponentDisplayName": "Training",
},
wait=True,
)
# give it a while before dispatching the next training job
time.sleep(2)
###Output
_____no_output_____
###Markdown
Compare the model training runs for an experimentNow we will use the analytics capabilities of Python SDK to query and compare the training runs for identifying the best model produced by our experiment. You can retrieve trial components by using a search expression. Some Simple Analyses
###Code
search_expression = {
"Filters": [
{
"Name": "DisplayName",
"Operator": "Equals",
"Value": "Training",
}
],
}
trial_component_analytics = ExperimentAnalytics(
sagemaker_session=Session(sess, sm),
experiment_name=mnist_experiment.experiment_name,
search_expression=search_expression,
sort_by="metrics.test:accuracy.max",
sort_order="Descending",
metric_names=["test:accuracy"],
parameter_names=["hidden_channels", "epochs", "dropout", "optimizer"],
)
trial_component_analytics.dataframe()
###Output
_____no_output_____
###Markdown
To isolate and measure the impact of change in hidden channels on model accuracy, we vary the number of hidden channel and fix the value for other hyperparameters.Next let's look at an example of tracing the lineage of a model by accessing the data tracked by SageMaker Experiments for `cnn-training-job-2-hidden-channels` trial
###Code
lineage_table = ExperimentAnalytics(
sagemaker_session=Session(sess, sm),
search_expression={
"Filters": [
{
"Name": "Parents.TrialName",
"Operator": "Equals",
"Value": hidden_channel_trial_name_map[2],
}
]
},
sort_by="CreationTime",
sort_order="Ascending",
)
lineage_table.dataframe()
###Output
_____no_output_____
###Markdown
Deploy endpoint for the best training-job / trial componentNow we'll take the best (as sorted) and create an endpoint for it.
###Code
# Pulling best based on sort in the analytics/dataframe so first is best....
best_trial_component_name = trial_component_analytics.dataframe().iloc[0]["TrialComponentName"]
best_trial_component = TrialComponent.load(best_trial_component_name)
model_data = best_trial_component.output_artifacts["SageMaker.ModelArtifact"].value
env = {
"hidden_channels": str(int(best_trial_component.parameters["hidden_channels"])),
"dropout": str(best_trial_component.parameters["dropout"]),
"kernel_size": str(int(best_trial_component.parameters["kernel_size"])),
}
model = PyTorchModel(
model_data,
role,
"./mnist.py",
py_version="py3",
env=env,
sagemaker_session=sagemaker.Session(sagemaker_client=sm),
framework_version="1.1.0",
name=best_trial_component.trial_component_name,
)
predictor = model.deploy(instance_type="ml.m5.xlarge", initial_instance_count=1)
###Output
_____no_output_____
###Markdown
CleanupOnce we're doing don't forget to clean up the endpoint to prevent unnecessary billing.> Trial components can exist independent of trials and experiments. You might want keep them if you plan on further exploration. If so, comment out tc.delete()
###Code
predictor.delete_endpoint()
mnist_experiment.delete_all(action="--force")
###Output
_____no_output_____
###Markdown
Run a SageMaker Experiment with MNIST Handwritten Digits ClassificationThis demo shows how you can use the [SageMaker Experiments Python SDK](https://sagemaker-experiments.readthedocs.io/en/latest/) to organize, track, compare, and evaluate your machine learning (ML) model training experiments.You can track artifacts for experiments, including data sets, algorithms, hyperparameters, and metrics. Experiments executed on SageMaker such as SageMaker Autopilot jobs and training jobs are automatically tracked. You can also track artifacts for additional steps within an ML workflow that come before or after model training, such as data pre-processing or post-training model evaluation.The APIs also let you search and browse your current and past experiments, compare experiments, and identify best-performing models.We demonstrate these capabilities through an MNIST handwritten digits classification example. The experiment is organized as follows:1. Download and prepare the MNIST dataset.2. Train a Convolutional Neural Network (CNN) Model. Tune the hyperparameter that configures the number of hidden channels in the model. Track the parameter configurations and resulting model accuracy using the SageMaker Experiments Python SDK.3. Finally use the search and analytics capabilities of the SDK to search, compare and evaluate the performance of all model versions generated from model tuning in Step 2.4. We also show an example of tracing the complete lineage of a model version: the collection of all the data pre-processing and training configurations and inputs that went into creating that model version.Make sure you select the `Python 3 (Data Science)` kernel in Studio, or `conda_pytorch_p36` in a notebook instance. RuntimeThis notebook takes approximately 25 minutes to run. Contents1. [Install modules](Install-modules)1. [Setup](Setup)1. [Download the dataset](Download-the-dataset)1. [Step 1: Set up the Experiment](Step-1:-Set-up-the-Experiment)1. [Step 2: Track Experiment](Step-2:-Track-Experiment)1. [Deploy an endpoint for the best training job / trial component](Deploy-an-endpoint-for-the-best-training-job-/-trial-component)1. [Cleanup](Cleanup)1. [Contact](Contact) Install modules
###Code
import sys
###Output
_____no_output_____
###Markdown
Install the SageMaker Experiments Python SDK
###Code
!{sys.executable} -m pip install sagemaker-experiments==0.1.35
###Output
_____no_output_____
###Markdown
Install PyTorch
###Code
# PyTorch version needs to be the same in both the notebook instance and the training job container
# https://github.com/pytorch/pytorch/issues/25214
!{sys.executable} -m pip install torch==1.1.0
!{sys.executable} -m pip install torchvision==0.2.2
!{sys.executable} -m pip install pillow==6.2.2
!{sys.executable} -m pip install --upgrade sagemaker
###Output
_____no_output_____
###Markdown
Setup
###Code
import time
import boto3
import numpy as np
import pandas as pd
from IPython.display import set_matplotlib_formats
from matplotlib import pyplot as plt
from torchvision import datasets, transforms
import sagemaker
from sagemaker import get_execution_role
from sagemaker.session import Session
from sagemaker.analytics import ExperimentAnalytics
from smexperiments.experiment import Experiment
from smexperiments.trial import Trial
from smexperiments.trial_component import TrialComponent
from smexperiments.tracker import Tracker
set_matplotlib_formats("retina")
sm_sess = sagemaker.Session()
sess = sm_sess.boto_session
sm = sm_sess.sagemaker_client
role = get_execution_role()
###Output
_____no_output_____
###Markdown
Download the datasetWe download the MNIST handwritten digits dataset, and then apply a transformation on each image.
###Code
bucket = sm_sess.default_bucket()
prefix = "DEMO-mnist"
print("Using S3 location: s3://" + bucket + "/" + prefix + "/")
datasets.MNIST.urls = [
"https://sagemaker-sample-files.s3.amazonaws.com/datasets/image/MNIST/train-images-idx3-ubyte.gz",
"https://sagemaker-sample-files.s3.amazonaws.com/datasets/image/MNIST/train-labels-idx1-ubyte.gz",
"https://sagemaker-sample-files.s3.amazonaws.com/datasets/image/MNIST/t10k-images-idx3-ubyte.gz",
"https://sagemaker-sample-files.s3.amazonaws.com/datasets/image/MNIST/t10k-labels-idx1-ubyte.gz",
]
# Download the dataset to the ./mnist folder, and load and transform (normalize) them
train_set = datasets.MNIST(
"mnist",
train=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
download=True,
)
test_set = datasets.MNIST(
"mnist",
train=False,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
download=False,
)
###Output
_____no_output_____
###Markdown
View an example image from the dataset.
###Code
plt.imshow(train_set.data[2].numpy())
###Output
_____no_output_____
###Markdown
After transforming the images in the dataset, we upload it to S3.
###Code
inputs = sagemaker.Session().upload_data(path="mnist", bucket=bucket, key_prefix=prefix)
###Output
_____no_output_____
###Markdown
Now let's track the parameters from the data pre-processing step.
###Code
with Tracker.create(display_name="Preprocessing", sagemaker_boto_client=sm) as tracker:
tracker.log_parameters(
{
"normalization_mean": 0.1307,
"normalization_std": 0.3081,
}
)
# We can log the S3 uri to the dataset we just uploaded
tracker.log_input(name="mnist-dataset", media_type="s3/uri", value=inputs)
###Output
_____no_output_____
###Markdown
Step 1: Set up the ExperimentCreate an experiment to track all the model training iterations. Experiments are a great way to organize your data science work. You can create experiments to organize all your model development work for: [1] a business use case you are addressing (e.g. create experiment named “customer churn prediction”), or [2] a data science team that owns the experiment (e.g. create experiment named “marketing analytics experiment”), or [3] a specific data science and ML project. Think of it as a “folder” for organizing your “files”. Create an Experiment
###Code
mnist_experiment = Experiment.create(
experiment_name=f"mnist-hand-written-digits-classification-{int(time.time())}",
description="Classification of mnist hand-written digits",
sagemaker_boto_client=sm,
)
print(mnist_experiment)
###Output
_____no_output_____
###Markdown
Step 2: Track Experiment Now create a Trial for each training run to track its inputs, parameters, and metrics.While training the CNN model on SageMaker, we experiment with several values for the number of hidden channel in the model. We create a Trial to track each training job run. We also create a TrialComponent from the tracker we created before, and add to the Trial. This enriches the Trial with the parameters we captured from the data pre-processing stage.
###Code
from sagemaker.pytorch import PyTorch, PyTorchModel
hidden_channel_trial_name_map = {}
###Output
_____no_output_____
###Markdown
If you want to run the following five training jobs in parallel, you may need to increase your resource limit. Here we run them sequentially.
###Code
preprocessing_trial_component = tracker.trial_component
for i, num_hidden_channel in enumerate([2, 5, 10, 20, 32]):
# Create trial
trial_name = f"cnn-training-job-{num_hidden_channel}-hidden-channels-{int(time.time())}"
cnn_trial = Trial.create(
trial_name=trial_name,
experiment_name=mnist_experiment.experiment_name,
sagemaker_boto_client=sm,
)
hidden_channel_trial_name_map[num_hidden_channel] = trial_name
# Associate the proprocessing trial component with the current trial
cnn_trial.add_trial_component(preprocessing_trial_component)
# All input configurations, parameters, and metrics specified in
# the estimator definition are automatically tracked
estimator = PyTorch(
py_version="py3",
entry_point="./mnist.py",
role=role,
sagemaker_session=sagemaker.Session(sagemaker_client=sm),
framework_version="1.1.0",
instance_count=1,
instance_type="ml.c4.xlarge",
hyperparameters={
"epochs": 2,
"backend": "gloo",
"hidden_channels": num_hidden_channel,
"dropout": 0.2,
"kernel_size": 5,
"optimizer": "sgd",
},
metric_definitions=[
{"Name": "train:loss", "Regex": "Train Loss: (.*?);"},
{"Name": "test:loss", "Regex": "Test Average loss: (.*?),"},
{"Name": "test:accuracy", "Regex": "Test Accuracy: (.*?)%;"},
],
enable_sagemaker_metrics=True,
)
cnn_training_job_name = "cnn-training-job-{}".format(int(time.time()))
# Associate the estimator with the Experiment and Trial
estimator.fit(
inputs={"training": inputs},
job_name=cnn_training_job_name,
experiment_config={
"TrialName": cnn_trial.trial_name,
"TrialComponentDisplayName": "Training",
},
wait=True,
)
# Wait two seconds before dispatching the next training job
time.sleep(2)
###Output
_____no_output_____
###Markdown
Compare the model training runs for an experimentNow we use the analytics capabilities of the Experiments SDK to query and compare the training runs for identifying the best model produced by our experiment. You can retrieve trial components by using a search expression. Some Simple Analyses
###Code
search_expression = {
"Filters": [
{
"Name": "DisplayName",
"Operator": "Equals",
"Value": "Training",
}
],
}
trial_component_analytics = ExperimentAnalytics(
sagemaker_session=Session(sess, sm),
experiment_name=mnist_experiment.experiment_name,
search_expression=search_expression,
sort_by="metrics.test:accuracy.max",
sort_order="Descending",
metric_names=["test:accuracy"],
parameter_names=["hidden_channels", "epochs", "dropout", "optimizer"],
)
trial_component_analytics.dataframe()
###Output
_____no_output_____
###Markdown
To isolate and measure the impact of change in hidden channels on model accuracy, we vary the number of hidden channel and fix the value for other hyperparameters.Next let's look at an example of tracing the lineage of a model by accessing the data tracked by SageMaker Experiments for the `cnn-training-job-2-hidden-channels` trial.
###Code
lineage_table = ExperimentAnalytics(
sagemaker_session=Session(sess, sm),
search_expression={
"Filters": [
{
"Name": "Parents.TrialName",
"Operator": "Equals",
"Value": hidden_channel_trial_name_map[2],
}
]
},
sort_by="CreationTime",
sort_order="Ascending",
)
lineage_table.dataframe()
###Output
_____no_output_____
###Markdown
Deploy an endpoint for the best training job / trial componentNow we take the best model and deploy it to an endpoint so it is available to perform inference.
###Code
# Pulling best based on sort in the analytics/dataframe, so first is best....
best_trial_component_name = trial_component_analytics.dataframe().iloc[0]["TrialComponentName"]
best_trial_component = TrialComponent.load(best_trial_component_name)
model_data = best_trial_component.output_artifacts["SageMaker.ModelArtifact"].value
env = {
"hidden_channels": str(int(best_trial_component.parameters["hidden_channels"])),
"dropout": str(best_trial_component.parameters["dropout"]),
"kernel_size": str(int(best_trial_component.parameters["kernel_size"])),
}
model = PyTorchModel(
model_data,
role,
"./mnist.py",
py_version="py3",
env=env,
sagemaker_session=sagemaker.Session(sagemaker_client=sm),
framework_version="1.1.0",
name=best_trial_component.trial_component_name,
)
predictor = model.deploy(instance_type="ml.m5.xlarge", initial_instance_count=1)
###Output
_____no_output_____
###Markdown
CleanupOnce we're done, clean up the endpoint to prevent unnecessary billing.
###Code
predictor.delete_endpoint()
###Output
_____no_output_____
###Markdown
Trial components can exist independently of trials and experiments. You might want keep them if you plan on further exploration. If not, delete all experiment artifacts.
###Code
mnist_experiment.delete_all(action="--force")
###Output
_____no_output_____
###Markdown
MNIST Handwritten Digits Classification ExperimentThis demo shows how you can use SageMaker Experiment Management Python SDK to organize, track, compare, and evaluate your machine learning (ML) model training experiments.You can track artifacts for experiments, including data sets, algorithms, hyper-parameters, and metrics. Experiments executed on SageMaker such as SageMaker Autopilot jobs and training jobs will be automatically tracked. You can also track artifacts for additional steps within an ML workflow that come before/after model training e.g. data pre-processing or post-training model evaluation.The APIs also let you search and browse your current and past experiments, compare experiments, and identify best performing models.Now we will demonstrate these capabilities through an MNIST handwritten digits classification example. The experiment will be organized as follow:1. Download and prepare the MNIST dataset.2. Train a Convolutional Neural Network (CNN) Model. Tune the hyper parameter that configures the number of hidden channels in the model. Track the parameter configurations and resulting model accuracy using SageMaker Experiments Python SDK.3. Finally use the search and analytics capabilities of Python SDK to search, compare and evaluate the performance of all model versions generated from model tuning in Step 2.4. We will also see an example of tracing the complete linage of a model version i.e. the collection of all the data pre-processing and training configurations and inputs that went into creating that model version.Make sure you selected `Python 3 (Data Science)` kernel. Install Python SDKs
###Code
import sys
!{sys.executable} -m pip install sagemaker-experiments==0.1.24
###Output
_____no_output_____
###Markdown
Install PyTroch
###Code
# pytorch version needs to be the same in both the notebook instance and the training job container
# https://github.com/pytorch/pytorch/issues/25214
!{sys.executable} -m pip install torch==1.1.0
!{sys.executable} -m pip install torchvision==0.3.0
!{sys.executable} -m pip install pillow==6.2.2
!{sys.executable} -m pip install --upgrade sagemaker
###Output
_____no_output_____
###Markdown
Setup
###Code
import time
import boto3
import numpy as np
import pandas as pd
from IPython.display import set_matplotlib_formats
from matplotlib import pyplot as plt
from torchvision import datasets, transforms
import sagemaker
from sagemaker import get_execution_role
from sagemaker.session import Session
from sagemaker.analytics import ExperimentAnalytics
from smexperiments.experiment import Experiment
from smexperiments.trial import Trial
from smexperiments.trial_component import TrialComponent
from smexperiments.tracker import Tracker
set_matplotlib_formats("retina")
sm_sess = sagemaker.Session()
sess = sm_sess.boto_session
sm = sm_sess.sagemaker_client
role = get_execution_role()
###Output
_____no_output_____
###Markdown
Create a S3 bucket to hold data
###Code
# create a s3 bucket to hold data, note that your account might already created a bucket with the same name
account_id = sess.client("sts").get_caller_identity()["Account"]
bucket = "sagemaker-experiments-{}-{}".format(sess.region_name, account_id)
prefix = "mnist"
try:
if sess.region_name == "us-east-1":
sess.client("s3").create_bucket(Bucket=bucket)
else:
sess.client("s3").create_bucket(
Bucket=bucket, CreateBucketConfiguration={"LocationConstraint": sess.region_name}
)
except Exception as e:
print(e)
###Output
_____no_output_____
###Markdown
DatasetWe download the MNIST hand written digits dataset, and then apply transformation on each of the image.
###Code
datasets.MNIST.urls = [
"https://sagemaker-sample-files.s3.amazonaws.com/datasets/image/MNIST/train-images-idx3-ubyte.gz",
"https://sagemaker-sample-files.s3.amazonaws.com/datasets/image/MNIST/train-labels-idx1-ubyte.gz",
"https://sagemaker-sample-files.s3.amazonaws.com/datasets/image/MNIST/t10k-images-idx3-ubyte.gz",
"https://sagemaker-sample-files.s3.amazonaws.com/datasets/image/MNIST/t10k-labels-idx1-ubyte.gz",
]
# download the dataset
# this will not only download data to ./mnist folder, but also load and transform (normalize) them
train_set = datasets.MNIST(
"mnist",
train=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
download=True,
)
test_set = datasets.MNIST(
"mnist",
train=False,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
download=False,
)
plt.imshow(train_set.data[2].numpy())
###Output
_____no_output_____
###Markdown
After transforming the images in the dataset, we upload it to s3.
###Code
inputs = sagemaker.Session().upload_data(path="mnist", bucket=bucket, key_prefix=prefix)
print("input spec: {}".format(inputs))
###Output
_____no_output_____
###Markdown
Now lets track the parameters from the data pre-processing step.
###Code
with Tracker.create(display_name="Preprocessing", sagemaker_boto_client=sm) as tracker:
tracker.log_parameters(
{
"normalization_mean": 0.1307,
"normalization_std": 0.3081,
}
)
# we can log the s3 uri to the dataset we just uploaded
tracker.log_input(name="mnist-dataset", media_type="s3/uri", value=inputs)
###Output
_____no_output_____
###Markdown
Step 1 - Set up the ExperimentCreate an experiment to track all the model training iterations. Experiments are a great way to organize your data science work. You can create experiments to organize all your model development work for : [1] a business use case you are addressing (e.g. create experiment named “customer churn prediction”), or [2] a data science team that owns the experiment (e.g. create experiment named “marketing analytics experiment”), or [3] a specific data science and ML project. Think of it as a “folder” for organizing your “files”. Create an Experiment
###Code
mnist_experiment = Experiment.create(
experiment_name=f"mnist-hand-written-digits-classification-{int(time.time())}",
description="Classification of mnist hand-written digits",
sagemaker_boto_client=sm,
)
print(mnist_experiment)
###Output
_____no_output_____
###Markdown
Step 2 - Track Experiment Now create a Trial for each training run to track the it's inputs, parameters, and metrics.While training the CNN model on SageMaker, we will experiment with several values for the number of hidden channel in the model. We will create a Trial to track each training job run. We will also create a TrialComponent from the tracker we created before, and add to the Trial. This will enrich the Trial with the parameters we captured from the data pre-processing stage.Note the execution of the following code takes a while.
###Code
from sagemaker.pytorch import PyTorch, PyTorchModel
hidden_channel_trial_name_map = {}
###Output
_____no_output_____
###Markdown
If you want to run the following training jobs asynchronously, you may need to increase your resource limit. Otherwise, you can run them sequentially.
###Code
preprocessing_trial_component = tracker.trial_component
for i, num_hidden_channel in enumerate([2, 5, 10, 20, 32]):
# create trial
trial_name = f"cnn-training-job-{num_hidden_channel}-hidden-channels-{int(time.time())}"
cnn_trial = Trial.create(
trial_name=trial_name,
experiment_name=mnist_experiment.experiment_name,
sagemaker_boto_client=sm,
)
hidden_channel_trial_name_map[num_hidden_channel] = trial_name
# associate the proprocessing trial component with the current trial
cnn_trial.add_trial_component(preprocessing_trial_component)
# all input configurations, parameters, and metrics specified in estimator
# definition are automatically tracked
estimator = PyTorch(
py_version="py3",
entry_point="./mnist.py",
role=role,
sagemaker_session=sagemaker.Session(sagemaker_client=sm),
framework_version="1.1.0",
instance_count=1,
instance_type="ml.c4.xlarge",
hyperparameters={
"epochs": 2,
"backend": "gloo",
"hidden_channels": num_hidden_channel,
"dropout": 0.2,
"kernel_size": 5,
"optimizer": "sgd",
},
metric_definitions=[
{"Name": "train:loss", "Regex": "Train Loss: (.*?);"},
{"Name": "test:loss", "Regex": "Test Average loss: (.*?),"},
{"Name": "test:accuracy", "Regex": "Test Accuracy: (.*?)%;"},
],
enable_sagemaker_metrics=True,
)
cnn_training_job_name = "cnn-training-job-{}".format(int(time.time()))
# Now associate the estimator with the Experiment and Trial
estimator.fit(
inputs={"training": inputs},
job_name=cnn_training_job_name,
experiment_config={
"TrialName": cnn_trial.trial_name,
"TrialComponentDisplayName": "Training",
},
wait=True,
)
# give it a while before dispatching the next training job
time.sleep(2)
###Output
_____no_output_____
###Markdown
Compare the model training runs for an experimentNow we will use the analytics capabilities of Python SDK to query and compare the training runs for identifying the best model produced by our experiment. You can retrieve trial components by using a search expression. Some Simple Analyses
###Code
search_expression = {
"Filters": [
{
"Name": "DisplayName",
"Operator": "Equals",
"Value": "Training",
}
],
}
trial_component_analytics = ExperimentAnalytics(
sagemaker_session=Session(sess, sm),
experiment_name=mnist_experiment.experiment_name,
search_expression=search_expression,
sort_by="metrics.test:accuracy.max",
sort_order="Descending",
metric_names=["test:accuracy"],
parameter_names=["hidden_channels", "epochs", "dropout", "optimizer"],
)
trial_component_analytics.dataframe()
###Output
_____no_output_____
###Markdown
To isolate and measure the impact of change in hidden channels on model accuracy, we vary the number of hidden channel and fix the value for other hyperparameters.Next let's look at an example of tracing the lineage of a model by accessing the data tracked by SageMaker Experiments for `cnn-training-job-2-hidden-channels` trial
###Code
lineage_table = ExperimentAnalytics(
sagemaker_session=Session(sess, sm),
search_expression={
"Filters": [
{
"Name": "Parents.TrialName",
"Operator": "Equals",
"Value": hidden_channel_trial_name_map[2],
}
]
},
sort_by="CreationTime",
sort_order="Ascending",
)
lineage_table.dataframe()
###Output
_____no_output_____
###Markdown
Deploy endpoint for the best training-job / trial componentNow we'll take the best (as sorted) and create an endpoint for it.
###Code
# Pulling best based on sort in the analytics/dataframe so first is best....
best_trial_component_name = trial_component_analytics.dataframe().iloc[0]["TrialComponentName"]
best_trial_component = TrialComponent.load(best_trial_component_name)
model_data = best_trial_component.output_artifacts["SageMaker.ModelArtifact"].value
env = {
"hidden_channels": str(int(best_trial_component.parameters["hidden_channels"])),
"dropout": str(best_trial_component.parameters["dropout"]),
"kernel_size": str(int(best_trial_component.parameters["kernel_size"])),
}
model = PyTorchModel(
model_data,
role,
"./mnist.py",
py_version="py3",
env=env,
sagemaker_session=sagemaker.Session(sagemaker_client=sm),
framework_version="1.1.0",
name=best_trial_component.trial_component_name,
)
predictor = model.deploy(instance_type="ml.m5.xlarge", initial_instance_count=1)
###Output
_____no_output_____
###Markdown
CleanupOnce we're doing don't forget to clean up the endpoint to prevent unnecessary billing.> Trial components can exist independent of trials and experiments. You might want keep them if you plan on further exploration. If so, comment out tc.delete()
###Code
predictor.delete_endpoint()
mnist_experiment.delete_all(action="--force")
###Output
_____no_output_____
###Markdown
MNIST Handwritten Digits Classification ExperimentThis demo shows how you can use SageMaker Experiment Management Python SDK to organize, track, compare, and evaluate your machine learning (ML) model training experiments.You can track artifacts for experiments, including data sets, algorithms, hyper-parameters, and metrics. Experiments executed on SageMaker such as SageMaker Autopilot jobs and training jobs will be automatically tracked. You can also track artifacts for additional steps within an ML workflow that come before/after model training e.g. data pre-processing or post-training model evaluation.The APIs also let you search and browse your current and past experiments, compare experiments, and identify best performing models.Now we will demonstrate these capabilities through an MNIST handwritten digits classification example. The experiment will be organized as follow:1. Download and prepare the MNIST dataset.2. Train a Convolutional Neural Network (CNN) Model. Tune the hyper parameter that configures the number of hidden channels in the model. Track the parameter configurations and resulting model accuracy using SageMaker Experiments Python SDK.3. Finally use the search and analytics capabilities of Python SDK to search, compare and evaluate the performance of all model versions generated from model tuning in Step 2.4. We will also see an example of tracing the complete linage of a model version i.e. the collection of all the data pre-processing and training configurations and inputs that went into creating that model version.Make sure you selected `Python 3 (Data Science)` kernel. Install Python SDKs
###Code
import sys
!{sys.executable} -m pip install sagemaker-experiments==0.1.24
###Output
_____no_output_____
###Markdown
Install PyTroch
###Code
# pytorch version needs to be the same in both the notebook instance and the training job container
# https://github.com/pytorch/pytorch/issues/25214
!{sys.executable} -m pip install torch==1.1.0
!{sys.executable} -m pip install torchvision==0.3.0
!{sys.executable} -m pip install pillow==6.2.2
!{sys.executable} -m pip install --upgrade sagemaker
###Output
_____no_output_____
###Markdown
Setup
###Code
import time
import boto3
import numpy as np
import pandas as pd
from IPython.display import set_matplotlib_formats
from matplotlib import pyplot as plt
from torchvision import datasets, transforms
import sagemaker
from sagemaker import get_execution_role
from sagemaker.session import Session
from sagemaker.analytics import ExperimentAnalytics
from smexperiments.experiment import Experiment
from smexperiments.trial import Trial
from smexperiments.trial_component import TrialComponent
from smexperiments.tracker import Tracker
set_matplotlib_formats('retina')
sess = boto3.Session()
sm = sess.client('sagemaker')
role = get_execution_role()
###Output
_____no_output_____
###Markdown
Create a S3 bucket to hold data
###Code
# create a s3 bucket to hold data, note that your account might already created a bucket with the same name
account_id = sess.client('sts').get_caller_identity()["Account"]
bucket = 'sagemaker-experiments-{}-{}'.format(sess.region_name, account_id)
prefix = 'mnist'
try:
if sess.region_name == "us-east-1":
sess.client('s3').create_bucket(Bucket=bucket)
else:
sess.client('s3').create_bucket(Bucket=bucket,
CreateBucketConfiguration={'LocationConstraint': sess.region_name})
except Exception as e:
print(e)
###Output
_____no_output_____
###Markdown
DatasetWe download the MNIST hand written digits dataset, and then apply transformation on each of the image.
###Code
# TODO: can be removed after upgrade to torchvision==0.9.1
# see github.com/pytorch/vision/issues/1938 and github.com/pytorch/vision/issues/3549
datasets.MNIST.urls = [
'https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz',
'https://ossci-datasets.s3.amazonaws.com/mnist/train-labels-idx1-ubyte.gz',
'https://ossci-datasets.s3.amazonaws.com/mnist/t10k-images-idx3-ubyte.gz',
'https://ossci-datasets.s3.amazonaws.com/mnist/t10k-labels-idx1-ubyte.gz'
]
# download the dataset
# this will not only download data to ./mnist folder, but also load and transform (normalize) them
train_set = datasets.MNIST('mnist', train=True, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]),
download=True)
test_set = datasets.MNIST('mnist', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]),
download=False)
plt.imshow(train_set.data[2].numpy())
###Output
_____no_output_____
###Markdown
After transforming the images in the dataset, we upload it to s3.
###Code
inputs = sagemaker.Session().upload_data(path='mnist', bucket=bucket, key_prefix=prefix)
print('input spec: {}'.format(inputs))
###Output
_____no_output_____
###Markdown
Now lets track the parameters from the data pre-processing step.
###Code
with Tracker.create(display_name="Preprocessing", sagemaker_boto_client=sm) as tracker:
tracker.log_parameters({
"normalization_mean": 0.1307,
"normalization_std": 0.3081,
})
# we can log the s3 uri to the dataset we just uploaded
tracker.log_input(name="mnist-dataset", media_type="s3/uri", value=inputs)
###Output
_____no_output_____
###Markdown
Step 1 - Set up the ExperimentCreate an experiment to track all the model training iterations. Experiments are a great way to organize your data science work. You can create experiments to organize all your model development work for : [1] a business use case you are addressing (e.g. create experiment named “customer churn prediction”), or [2] a data science team that owns the experiment (e.g. create experiment named “marketing analytics experiment”), or [3] a specific data science and ML project. Think of it as a “folder” for organizing your “files”. Create an Experiment
###Code
mnist_experiment = Experiment.create(
experiment_name=f"mnist-hand-written-digits-classification-{int(time.time())}",
description="Classification of mnist hand-written digits",
sagemaker_boto_client=sm)
print(mnist_experiment)
###Output
_____no_output_____
###Markdown
Step 2 - Track Experiment Now create a Trial for each training run to track the it's inputs, parameters, and metrics.While training the CNN model on SageMaker, we will experiment with several values for the number of hidden channel in the model. We will create a Trial to track each training job run. We will also create a TrialComponent from the tracker we created before, and add to the Trial. This will enrich the Trial with the parameters we captured from the data pre-processing stage.Note the execution of the following code takes a while.
###Code
from sagemaker.pytorch import PyTorch, PyTorchModel
hidden_channel_trial_name_map = {}
###Output
_____no_output_____
###Markdown
If you want to run the following training jobs asynchronously, you may need to increase your resource limit. Otherwise, you can run them sequentially.
###Code
preprocessing_trial_component = tracker.trial_component
for i, num_hidden_channel in enumerate([2, 5, 10, 20, 32]):
# create trial
trial_name = f"cnn-training-job-{num_hidden_channel}-hidden-channels-{int(time.time())}"
cnn_trial = Trial.create(
trial_name=trial_name,
experiment_name=mnist_experiment.experiment_name,
sagemaker_boto_client=sm,
)
hidden_channel_trial_name_map[num_hidden_channel] = trial_name
# associate the proprocessing trial component with the current trial
cnn_trial.add_trial_component(preprocessing_trial_component)
# all input configurations, parameters, and metrics specified in estimator
# definition are automatically tracked
estimator = PyTorch(
py_version='py3',
entry_point='./mnist.py',
role=role,
sagemaker_session=sagemaker.Session(sagemaker_client=sm),
framework_version='1.1.0',
instance_count=1,
instance_type='ml.c4.xlarge',
hyperparameters={
'epochs': 2,
'backend': 'gloo',
'hidden_channels': num_hidden_channel,
'dropout': 0.2,
'kernel_size': 5,
'optimizer': 'sgd'
},
metric_definitions=[
{'Name':'train:loss', 'Regex':'Train Loss: (.*?);'},
{'Name':'test:loss', 'Regex':'Test Average loss: (.*?),'},
{'Name':'test:accuracy', 'Regex':'Test Accuracy: (.*?)%;'}
],
enable_sagemaker_metrics=True
)
cnn_training_job_name = "cnn-training-job-{}".format(int(time.time()))
# Now associate the estimator with the Experiment and Trial
estimator.fit(
inputs={'training': inputs},
job_name=cnn_training_job_name,
experiment_config={
"TrialName": cnn_trial.trial_name,
"TrialComponentDisplayName": "Training",
},
wait=True,
)
# give it a while before dispatching the next training job
time.sleep(2)
###Output
_____no_output_____
###Markdown
Compare the model training runs for an experimentNow we will use the analytics capabilities of Python SDK to query and compare the training runs for identifying the best model produced by our experiment. You can retrieve trial components by using a search expression. Some Simple Analyses
###Code
search_expression = {
"Filters":[
{
"Name": "DisplayName",
"Operator": "Equals",
"Value": "Training",
}
],
}
trial_component_analytics = ExperimentAnalytics(
sagemaker_session=Session(sess, sm),
experiment_name=mnist_experiment.experiment_name,
search_expression=search_expression,
sort_by="metrics.test:accuracy.max",
sort_order="Descending",
metric_names=['test:accuracy'],
parameter_names=['hidden_channels', 'epochs', 'dropout', 'optimizer']
)
trial_component_analytics.dataframe()
###Output
_____no_output_____
###Markdown
To isolate and measure the impact of change in hidden channels on model accuracy, we vary the number of hidden channel and fix the value for other hyperparameters.Next let's look at an example of tracing the lineage of a model by accessing the data tracked by SageMaker Experiments for `cnn-training-job-2-hidden-channels` trial
###Code
lineage_table = ExperimentAnalytics(
sagemaker_session=Session(sess, sm),
search_expression={
"Filters":[{
"Name": "Parents.TrialName",
"Operator": "Equals",
"Value": hidden_channel_trial_name_map[2]
}]
},
sort_by="CreationTime",
sort_order="Ascending",
)
lineage_table.dataframe()
###Output
_____no_output_____
###Markdown
Deploy endpoint for the best training-job / trial componentNow we'll take the best (as sorted) and create an endpoint for it.
###Code
#Pulling best based on sort in the analytics/dataframe so first is best....
best_trial_component_name = trial_component_analytics.dataframe().iloc[0]['TrialComponentName']
best_trial_component = TrialComponent.load(best_trial_component_name)
model_data = best_trial_component.output_artifacts['SageMaker.ModelArtifact'].value
env = {'hidden_channels': str(int(best_trial_component.parameters['hidden_channels'])),
'dropout': str(best_trial_component.parameters['dropout']),
'kernel_size': str(int(best_trial_component.parameters['kernel_size']))}
model = PyTorchModel(
model_data,
role,
'./mnist.py',
py_version='py3',
env=env,
sagemaker_session=sagemaker.Session(sagemaker_client=sm),
framework_version='1.1.0',
name=best_trial_component.trial_component_name)
predictor = model.deploy(
instance_type='ml.m5.xlarge',
initial_instance_count=1)
###Output
_____no_output_____
###Markdown
CleanupOnce we're doing don't forget to clean up the endpoint to prevent unnecessary billing.> Trial components can exist independent of trials and experiments. You might want keep them if you plan on further exploration. If so, comment out tc.delete()
###Code
predictor.delete_endpoint()
mnist_experiment.delete_all(action='--force')
###Output
_____no_output_____
###Markdown
MNIST Handwritten Digits Classification ExperimentThis demo shows how you can use SageMaker Experiment Management Python SDK to organize, track, compare, and evaluate your machine learning (ML) model training experiments.You can track artifacts for experiments, including data sets, algorithms, hyper-parameters, and metrics. Experiments executed on SageMaker such as SageMaker Autopilot jobs and training jobs will be automatically tracked. You can also track artifacts for additional steps within an ML workflow that come before/after model training e.g. data pre-processing or post-training model evaluation.The APIs also let you search and browse your current and past experiments, compare experiments, and identify best performing models.Now we will demonstrate these capabilities through an MNIST handwritten digits classification example. The experiment will be organized as follow:1. Download and prepare the MNIST dataset.2. Train a Convolutional Neural Network (CNN) Model. Tune the hyper parameter that configures the number of hidden channels in the model. Track the parameter configurations and resulting model accuracy using SageMaker Experiments Python SDK.3. Finally use the search and analytics capabilities of Python SDK to search, compare and evaluate the performance of all model versions generated from model tuning in Step 2.4. We will also see an example of tracing the complete linage of a model version i.e. the collection of all the data pre-processing and training configurations and inputs that went into creating that model version.Make sure you selected `Python 3 (Data Science)` kernel. Install Python SDKs
###Code
import sys
!{sys.executable} -m pip install sagemaker-experiments==0.1.24
###Output
_____no_output_____
###Markdown
Install PyTroch
###Code
# pytorch version needs to be the same in both the notebook instance and the training job container
# https://github.com/pytorch/pytorch/issues/25214
!{sys.executable} -m pip install torch==1.1.0
!{sys.executable} -m pip install torchvision==0.3.0
!{sys.executable} -m pip install pillow==6.2.2
!{sys.executable} -m pip install --upgrade sagemaker
###Output
_____no_output_____
###Markdown
Setup
###Code
import time
import boto3
import numpy as np
import pandas as pd
from IPython.display import set_matplotlib_formats
from matplotlib import pyplot as plt
from torchvision import datasets, transforms
import sagemaker
from sagemaker import get_execution_role
from sagemaker.session import Session
from sagemaker.analytics import ExperimentAnalytics
from smexperiments.experiment import Experiment
from smexperiments.trial import Trial
from smexperiments.trial_component import TrialComponent
from smexperiments.tracker import Tracker
set_matplotlib_formats("retina")
sess = boto3.Session()
sm = sess.client("sagemaker")
role = get_execution_role()
###Output
_____no_output_____
###Markdown
Create a S3 bucket to hold data
###Code
# create a s3 bucket to hold data, note that your account might already created a bucket with the same name
account_id = sess.client("sts").get_caller_identity()["Account"]
bucket = "sagemaker-experiments-{}-{}".format(sess.region_name, account_id)
prefix = "mnist"
try:
if sess.region_name == "us-east-1":
sess.client("s3").create_bucket(Bucket=bucket)
else:
sess.client("s3").create_bucket(
Bucket=bucket, CreateBucketConfiguration={"LocationConstraint": sess.region_name}
)
except Exception as e:
print(e)
###Output
_____no_output_____
###Markdown
DatasetWe download the MNIST hand written digits dataset, and then apply transformation on each of the image.
###Code
datasets.MNIST.urls = [
"https://sagemaker-sample-files.s3.amazonaws.com/datasets/image/MNIST/train-images-idx3-ubyte.gz",
"https://sagemaker-sample-files.s3.amazonaws.com/datasets/image/MNIST/train-labels-idx1-ubyte.gz",
"https://sagemaker-sample-files.s3.amazonaws.com/datasets/image/MNIST/t10k-images-idx3-ubyte.gz",
"https://sagemaker-sample-files.s3.amazonaws.com/datasets/image/MNIST/t10k-labels-idx1-ubyte.gz",
]
# download the dataset
# this will not only download data to ./mnist folder, but also load and transform (normalize) them
train_set = datasets.MNIST(
"mnist",
train=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
download=True,
)
test_set = datasets.MNIST(
"mnist",
train=False,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
download=False,
)
plt.imshow(train_set.data[2].numpy())
###Output
_____no_output_____
###Markdown
After transforming the images in the dataset, we upload it to s3.
###Code
inputs = sagemaker.Session().upload_data(path="mnist", bucket=bucket, key_prefix=prefix)
print("input spec: {}".format(inputs))
###Output
_____no_output_____
###Markdown
Now lets track the parameters from the data pre-processing step.
###Code
with Tracker.create(display_name="Preprocessing", sagemaker_boto_client=sm) as tracker:
tracker.log_parameters(
{
"normalization_mean": 0.1307,
"normalization_std": 0.3081,
}
)
# we can log the s3 uri to the dataset we just uploaded
tracker.log_input(name="mnist-dataset", media_type="s3/uri", value=inputs)
###Output
_____no_output_____
###Markdown
Step 1 - Set up the ExperimentCreate an experiment to track all the model training iterations. Experiments are a great way to organize your data science work. You can create experiments to organize all your model development work for : [1] a business use case you are addressing (e.g. create experiment named “customer churn prediction”), or [2] a data science team that owns the experiment (e.g. create experiment named “marketing analytics experiment”), or [3] a specific data science and ML project. Think of it as a “folder” for organizing your “files”. Create an Experiment
###Code
mnist_experiment = Experiment.create(
experiment_name=f"mnist-hand-written-digits-classification-{int(time.time())}",
description="Classification of mnist hand-written digits",
sagemaker_boto_client=sm,
)
print(mnist_experiment)
###Output
_____no_output_____
###Markdown
Step 2 - Track Experiment Now create a Trial for each training run to track the it's inputs, parameters, and metrics.While training the CNN model on SageMaker, we will experiment with several values for the number of hidden channel in the model. We will create a Trial to track each training job run. We will also create a TrialComponent from the tracker we created before, and add to the Trial. This will enrich the Trial with the parameters we captured from the data pre-processing stage.Note the execution of the following code takes a while.
###Code
from sagemaker.pytorch import PyTorch, PyTorchModel
hidden_channel_trial_name_map = {}
###Output
_____no_output_____
###Markdown
If you want to run the following training jobs asynchronously, you may need to increase your resource limit. Otherwise, you can run them sequentially.
###Code
preprocessing_trial_component = tracker.trial_component
for i, num_hidden_channel in enumerate([2, 5, 10, 20, 32]):
# create trial
trial_name = f"cnn-training-job-{num_hidden_channel}-hidden-channels-{int(time.time())}"
cnn_trial = Trial.create(
trial_name=trial_name,
experiment_name=mnist_experiment.experiment_name,
sagemaker_boto_client=sm,
)
hidden_channel_trial_name_map[num_hidden_channel] = trial_name
# associate the proprocessing trial component with the current trial
cnn_trial.add_trial_component(preprocessing_trial_component)
# all input configurations, parameters, and metrics specified in estimator
# definition are automatically tracked
estimator = PyTorch(
py_version="py3",
entry_point="./mnist.py",
role=role,
sagemaker_session=sagemaker.Session(sagemaker_client=sm),
framework_version="1.1.0",
instance_count=1,
instance_type="ml.c4.xlarge",
hyperparameters={
"epochs": 2,
"backend": "gloo",
"hidden_channels": num_hidden_channel,
"dropout": 0.2,
"kernel_size": 5,
"optimizer": "sgd",
},
metric_definitions=[
{"Name": "train:loss", "Regex": "Train Loss: (.*?);"},
{"Name": "test:loss", "Regex": "Test Average loss: (.*?),"},
{"Name": "test:accuracy", "Regex": "Test Accuracy: (.*?)%;"},
],
enable_sagemaker_metrics=True,
)
cnn_training_job_name = "cnn-training-job-{}".format(int(time.time()))
# Now associate the estimator with the Experiment and Trial
estimator.fit(
inputs={"training": inputs},
job_name=cnn_training_job_name,
experiment_config={
"TrialName": cnn_trial.trial_name,
"TrialComponentDisplayName": "Training",
},
wait=True,
)
# give it a while before dispatching the next training job
time.sleep(2)
###Output
_____no_output_____
###Markdown
Compare the model training runs for an experimentNow we will use the analytics capabilities of Python SDK to query and compare the training runs for identifying the best model produced by our experiment. You can retrieve trial components by using a search expression. Some Simple Analyses
###Code
search_expression = {
"Filters": [
{
"Name": "DisplayName",
"Operator": "Equals",
"Value": "Training",
}
],
}
trial_component_analytics = ExperimentAnalytics(
sagemaker_session=Session(sess, sm),
experiment_name=mnist_experiment.experiment_name,
search_expression=search_expression,
sort_by="metrics.test:accuracy.max",
sort_order="Descending",
metric_names=["test:accuracy"],
parameter_names=["hidden_channels", "epochs", "dropout", "optimizer"],
)
trial_component_analytics.dataframe()
###Output
_____no_output_____
###Markdown
To isolate and measure the impact of change in hidden channels on model accuracy, we vary the number of hidden channel and fix the value for other hyperparameters.Next let's look at an example of tracing the lineage of a model by accessing the data tracked by SageMaker Experiments for `cnn-training-job-2-hidden-channels` trial
###Code
lineage_table = ExperimentAnalytics(
sagemaker_session=Session(sess, sm),
search_expression={
"Filters": [
{
"Name": "Parents.TrialName",
"Operator": "Equals",
"Value": hidden_channel_trial_name_map[2],
}
]
},
sort_by="CreationTime",
sort_order="Ascending",
)
lineage_table.dataframe()
###Output
_____no_output_____
###Markdown
Deploy endpoint for the best training-job / trial componentNow we'll take the best (as sorted) and create an endpoint for it.
###Code
# Pulling best based on sort in the analytics/dataframe so first is best....
best_trial_component_name = trial_component_analytics.dataframe().iloc[0]["TrialComponentName"]
best_trial_component = TrialComponent.load(best_trial_component_name)
model_data = best_trial_component.output_artifacts["SageMaker.ModelArtifact"].value
env = {
"hidden_channels": str(int(best_trial_component.parameters["hidden_channels"])),
"dropout": str(best_trial_component.parameters["dropout"]),
"kernel_size": str(int(best_trial_component.parameters["kernel_size"])),
}
model = PyTorchModel(
model_data,
role,
"./mnist.py",
py_version="py3",
env=env,
sagemaker_session=sagemaker.Session(sagemaker_client=sm),
framework_version="1.1.0",
name=best_trial_component.trial_component_name,
)
predictor = model.deploy(instance_type="ml.m5.xlarge", initial_instance_count=1)
###Output
_____no_output_____
###Markdown
CleanupOnce we're doing don't forget to clean up the endpoint to prevent unnecessary billing.> Trial components can exist independent of trials and experiments. You might want keep them if you plan on further exploration. If so, comment out tc.delete()
###Code
predictor.delete_endpoint()
mnist_experiment.delete_all(action="--force")
###Output
_____no_output_____
###Markdown
MNIST Handwritten Digits Classification ExperimentThis demo shows how you can use SageMaker Experiment Management Python SDK to organize, track, compare, and evaluate your machine learning (ML) model training experiments.You can track artifacts for experiments, including data sets, algorithms, hyper-parameters, and metrics. Experiments executed on SageMaker such as SageMaker Autopilot jobs and training jobs will be automatically tracked. You can also track artifacts for additional steps within an ML workflow that come before/after model training e.g. data pre-processing or post-training model evaluation.The APIs also let you search and browse your current and past experiments, compare experiments, and identify best performing models.Now we will demonstrate these capabilities through an MNIST handwritten digits classification example. The experiment will be organized as follow:1. Download and prepare the MNIST dataset.2. Train a Convolutional Neural Network (CNN) Model. Tune the hyper parameter that configures the number of hidden channels in the model. Track the parameter configurations and resulting model accuracy using SageMaker Experiments Python SDK.3. Finally use the search and analytics capabilities of Python SDK to search, compare and evaluate the performance of all model versions generated from model tuning in Step 2.4. We will also see an example of tracing the complete linage of a model version i.e. the collection of all the data pre-processing and training configurations and inputs that went into creating that model version.Make sure you selected `Python 3 (Data Science)` kernel. Install Python SDKs
###Code
import sys
!{sys.executable} -m pip install sagemaker-experiments==0.1.24
###Output
_____no_output_____
###Markdown
Install PyTroch
###Code
# pytorch version needs to be the same in both the notebook instance and the training job container
# https://github.com/pytorch/pytorch/issues/25214
!{sys.executable} -m pip install torch==1.1.0
!{sys.executable} -m pip install torchvision==0.3.0
!{sys.executable} -m pip install pillow==6.2.2 !{sys.executable} -m pip install --upgrade sagemaker
###Output
_____no_output_____
###Markdown
Setup
###Code
import time
import boto3
import numpy as np
import pandas as pd
from IPython.display import set_matplotlib_formats
from matplotlib import pyplot as plt
from torchvision import datasets, transforms
import sagemaker
from sagemaker import get_execution_role
from sagemaker.session import Session
from sagemaker.analytics import ExperimentAnalytics
from smexperiments.experiment import Experiment
from smexperiments.trial import Trial
from smexperiments.trial_component import TrialComponent
from smexperiments.tracker import Tracker
set_matplotlib_formats('retina')
sess = boto3.Session()
sm = sess.client('sagemaker')
role = get_execution_role()
###Output
_____no_output_____
###Markdown
Create a S3 bucket to hold data
###Code
# create a s3 bucket to hold data, note that your account might already created a bucket with the same name
account_id = sess.client('sts').get_caller_identity()["Account"]
bucket = 'sagemaker-experiments-{}-{}'.format(sess.region_name, account_id)
prefix = 'mnist'
try:
if sess.region_name == "us-east-1":
sess.client('s3').create_bucket(Bucket=bucket)
else:
sess.client('s3').create_bucket(Bucket=bucket,
CreateBucketConfiguration={'LocationConstraint': sess.region_name})
except Exception as e:
print(e)
###Output
_____no_output_____
###Markdown
DatasetWe download the MNIST hand written digits dataset, and then apply transformation on each of the image.
###Code
# download the dataset
# this will not only download data to ./mnist folder, but also load and transform (normalize) them
train_set = datasets.MNIST('mnist', train=True, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]),
download=True)
test_set = datasets.MNIST('mnist', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]),
download=False)
plt.imshow(train_set.data[2].numpy())
###Output
_____no_output_____
###Markdown
After transforming the images in the dataset, we upload it to s3.
###Code
inputs = sagemaker.Session().upload_data(path='mnist', bucket=bucket, key_prefix=prefix)
print('input spec: {}'.format(inputs))
###Output
_____no_output_____
###Markdown
Now lets track the parameters from the data pre-processing step.
###Code
with Tracker.create(display_name="Preprocessing", sagemaker_boto_client=sm) as tracker:
tracker.log_parameters({
"normalization_mean": 0.1307,
"normalization_std": 0.3081,
})
# we can log the s3 uri to the dataset we just uploaded
tracker.log_input(name="mnist-dataset", media_type="s3/uri", value=inputs)
###Output
_____no_output_____
###Markdown
Step 1 - Set up the ExperimentCreate an experiment to track all the model training iterations. Experiments are a great way to organize your data science work. You can create experiments to organize all your model development work for : [1] a business use case you are addressing (e.g. create experiment named “customer churn prediction”), or [2] a data science team that owns the experiment (e.g. create experiment named “marketing analytics experiment”), or [3] a specific data science and ML project. Think of it as a “folder” for organizing your “files”. Create an Experiment
###Code
mnist_experiment = Experiment.create(
experiment_name=f"mnist-hand-written-digits-classification-{int(time.time())}",
description="Classification of mnist hand-written digits",
sagemaker_boto_client=sm)
print(mnist_experiment)
###Output
_____no_output_____
###Markdown
Step 2 - Track Experiment Now create a Trial for each training run to track the it's inputs, parameters, and metrics.While training the CNN model on SageMaker, we will experiment with several values for the number of hidden channel in the model. We will create a Trial to track each training job run. We will also create a TrialComponent from the tracker we created before, and add to the Trial. This will enrich the Trial with the parameters we captured from the data pre-processing stage.Note the execution of the following code takes a while.
###Code
from sagemaker.pytorch import PyTorch, PyTorchModel
hidden_channel_trial_name_map = {}
###Output
_____no_output_____
###Markdown
If you want to run the following training jobs asynchronously, you may need to increase your resource limit. Otherwise, you can run them sequentially.
###Code
preprocessing_trial_component = tracker.trial_component
for i, num_hidden_channel in enumerate([2, 5, 10, 20, 32]):
# create trial
trial_name = f"cnn-training-job-{num_hidden_channel}-hidden-channels-{int(time.time())}"
cnn_trial = Trial.create(
trial_name=trial_name,
experiment_name=mnist_experiment.experiment_name,
sagemaker_boto_client=sm,
)
hidden_channel_trial_name_map[num_hidden_channel] = trial_name
# associate the proprocessing trial component with the current trial
cnn_trial.add_trial_component(preprocessing_trial_component)
# all input configurations, parameters, and metrics specified in estimator
# definition are automatically tracked
estimator = PyTorch(
py_version='py3',
entry_point='./mnist.py',
role=role,
sagemaker_session=sagemaker.Session(sagemaker_client=sm),
framework_version='1.1.0',
instance_count=1,
instance_type='ml.c4.xlarge',
hyperparameters={
'epochs': 2,
'backend': 'gloo',
'hidden_channels': num_hidden_channel,
'dropout': 0.2,
'kernel_size': 5,
'optimizer': 'sgd'
},
metric_definitions=[
{'Name':'train:loss', 'Regex':'Train Loss: (.*?);'},
{'Name':'test:loss', 'Regex':'Test Average loss: (.*?),'},
{'Name':'test:accuracy', 'Regex':'Test Accuracy: (.*?)%;'}
],
enable_sagemaker_metrics=True
)
cnn_training_job_name = "cnn-training-job-{}".format(int(time.time()))
# Now associate the estimator with the Experiment and Trial
estimator.fit(
inputs={'training': inputs},
job_name=cnn_training_job_name,
experiment_config={
"TrialName": cnn_trial.trial_name,
"TrialComponentDisplayName": "Training",
},
wait=True,
)
# give it a while before dispatching the next training job
time.sleep(2)
###Output
_____no_output_____
###Markdown
Compare the model training runs for an experimentNow we will use the analytics capabilities of Python SDK to query and compare the training runs for identifying the best model produced by our experiment. You can retrieve trial components by using a search expression. Some Simple Analyses
###Code
search_expression = {
"Filters":[
{
"Name": "DisplayName",
"Operator": "Equals",
"Value": "Training",
}
],
}
trial_component_analytics = ExperimentAnalytics(
sagemaker_session=Session(sess, sm),
experiment_name=mnist_experiment.experiment_name,
search_expression=search_expression,
sort_by="metrics.test:accuracy.max",
sort_order="Descending",
metric_names=['test:accuracy'],
parameter_names=['hidden_channels', 'epochs', 'dropout', 'optimizer']
)
trial_component_analytics.dataframe()
###Output
_____no_output_____
###Markdown
To isolate and measure the impact of change in hidden channels on model accuracy, we vary the number of hidden channel and fix the value for other hyperparameters.Next let's look at an example of tracing the lineage of a model by accessing the data tracked by SageMaker Experiments for `cnn-training-job-2-hidden-channels` trial
###Code
lineage_table = ExperimentAnalytics(
sagemaker_session=Session(sess, sm),
search_expression={
"Filters":[{
"Name": "Parents.TrialName",
"Operator": "Equals",
"Value": hidden_channel_trial_name_map[2]
}]
},
sort_by="CreationTime",
sort_order="Ascending",
)
lineage_table.dataframe()
###Output
_____no_output_____
###Markdown
Deploy endpoint for the best training-job / trial componentNow we'll take the best (as sorted) and create an endpoint for it.
###Code
#Pulling best based on sort in the analytics/dataframe so first is best....
best_trial_component_name = trial_component_analytics.dataframe().iloc[0]['TrialComponentName']
best_trial_component = TrialComponent.load(best_trial_component_name)
model_data = best_trial_component.output_artifacts['SageMaker.ModelArtifact'].value
env = {'hidden_channels': str(int(best_trial_component.parameters['hidden_channels'])),
'dropout': str(best_trial_component.parameters['dropout']),
'kernel_size': str(int(best_trial_component.parameters['kernel_size']))}
model = PyTorchModel(
model_data,
role,
'./mnist.py',
py_version='py3',
env=env,
sagemaker_session=sagemaker.Session(sagemaker_client=sm),
framework_version='1.1.0',
name=best_trial_component.trial_component_name)
predictor = model.deploy(
instance_type='ml.m5.xlarge',
initial_instance_count=1)
###Output
_____no_output_____
###Markdown
CleanupOnce we're doing don't forget to clean up the endpoint to prevent unnecessary billing.> Trial components can exist independent of trials and experiments. You might want keep them if you plan on further exploration. If so, comment out tc.delete()
###Code
predictor.delete_endpoint()
mnist_experiment.delete_all(action='--force')
###Output
_____no_output_____
###Markdown
MNIST Handwritten Digits Classification ExperimentThis demo shows how you can use SageMaker Experiment Management Python SDK to organize, track, compare, and evaluate your machine learning (ML) model training experiments.You can track artifacts for experiments, including data sets, algorithms, hyper-parameters, and metrics. Experiments executed on SageMaker such as SageMaker Autopilot jobs and training jobs will be automatically tracked. You can also track artifacts for additional steps within an ML workflow that come before/after model training e.g. data pre-processing or post-training model evaluation.The APIs also let you search and browse your current and past experiments, compare experiments, and identify best performing models.Now we will demonstrate these capabilities through an MNIST handwritten digits classification example. The experiment will be organized as follow:1. Download and prepare the MNIST dataset.2. Train a Convolutional Neural Network (CNN) Model. Tune the hyper parameter that configures the number of hidden channels in the model. Track the parameter configurations and resulting model accuracy using SageMaker Experiments Python SDK.3. Finally use the search and analytics capabilities of Python SDK to search, compare and evaluate the performance of all model versions generated from model tuning in Step 2.4. We will also see an example of tracing the complete linage of a model version i.e. the collection of all the data pre-processing and training configurations and inputs that went into creating that model version.Make sure you selected `Python 3 (Data Science)` kernel. Install Python SDKs
###Code
import sys
!{sys.executable} -m pip install sagemaker-experiments==0.1.24
###Output
_____no_output_____
###Markdown
Install PyTroch
###Code
# pytorch version needs to be the same in both the notebook instance and the training job container
# https://github.com/pytorch/pytorch/issues/25214
!{sys.executable} -m pip install torch==1.1.0
!{sys.executable} -m pip install torchvision==0.3.0
!{sys.executable} -m pip install pillow==6.2.2
!{sys.executable} -m pip install --upgrade sagemaker
###Output
_____no_output_____
###Markdown
Setup
###Code
import time
import boto3
import numpy as np
import pandas as pd
from IPython.display import set_matplotlib_formats
from matplotlib import pyplot as plt
from torchvision import datasets, transforms
import sagemaker
from sagemaker import get_execution_role
from sagemaker.session import Session
from sagemaker.analytics import ExperimentAnalytics
from smexperiments.experiment import Experiment
from smexperiments.trial import Trial
from smexperiments.trial_component import TrialComponent
from smexperiments.tracker import Tracker
set_matplotlib_formats('retina')
sess = boto3.Session()
sm = sess.client('sagemaker')
role = get_execution_role()
###Output
_____no_output_____
###Markdown
Create a S3 bucket to hold data
###Code
# create a s3 bucket to hold data, note that your account might already created a bucket with the same name
account_id = sess.client('sts').get_caller_identity()["Account"]
bucket = 'sagemaker-experiments-{}-{}'.format(sess.region_name, account_id)
prefix = 'mnist'
try:
if sess.region_name == "us-east-1":
sess.client('s3').create_bucket(Bucket=bucket)
else:
sess.client('s3').create_bucket(Bucket=bucket,
CreateBucketConfiguration={'LocationConstraint': sess.region_name})
except Exception as e:
print(e)
###Output
_____no_output_____
###Markdown
DatasetWe download the MNIST hand written digits dataset, and then apply transformation on each of the image.
###Code
# TODO: can be removed after upgrade to torchvision==0.9.1
# see github.com/pytorch/vision/issues/1938 and github.com/pytorch/vision/issues/3549
datasets.MNIST.urls = [
'https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz',
'https://ossci-datasets.s3.amazonaws.com/mnist/train-labels-idx1-ubyte.gz',
'https://ossci-datasets.s3.amazonaws.com/mnist/t10k-images-idx3-ubyte.gz',
'https://ossci-datasets.s3.amazonaws.com/mnist/t10k-labels-idx1-ubyte.gz'
]
# download the dataset
# this will not only download data to ./mnist folder, but also load and transform (normalize) them
train_set = datasets.MNIST('mnist', train=True, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]),
download=True)
test_set = datasets.MNIST('mnist', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]),
download=False)
plt.imshow(train_set.data[2].numpy())
###Output
_____no_output_____
###Markdown
After transforming the images in the dataset, we upload it to s3.
###Code
inputs = sagemaker.Session().upload_data(path='mnist', bucket=bucket, key_prefix=prefix)
print('input spec: {}'.format(inputs))
###Output
_____no_output_____
###Markdown
Now lets track the parameters from the data pre-processing step.
###Code
with Tracker.create(display_name="Preprocessing", sagemaker_boto_client=sm) as tracker:
tracker.log_parameters({
"normalization_mean": 0.1307,
"normalization_std": 0.3081,
})
# we can log the s3 uri to the dataset we just uploaded
tracker.log_input(name="mnist-dataset", media_type="s3/uri", value=inputs)
###Output
_____no_output_____
###Markdown
Step 1 - Set up the ExperimentCreate an experiment to track all the model training iterations. Experiments are a great way to organize your data science work. You can create experiments to organize all your model development work for : [1] a business use case you are addressing (e.g. create experiment named “customer churn prediction”), or [2] a data science team that owns the experiment (e.g. create experiment named “marketing analytics experiment”), or [3] a specific data science and ML project. Think of it as a “folder” for organizing your “files”. Create an Experiment
###Code
mnist_experiment = Experiment.create(
experiment_name=f"mnist-hand-written-digits-classification-{int(time.time())}",
description="Classification of mnist hand-written digits",
sagemaker_boto_client=sm)
print(mnist_experiment)
###Output
_____no_output_____
###Markdown
Step 2 - Track Experiment Now create a Trial for each training run to track the it's inputs, parameters, and metrics.While training the CNN model on SageMaker, we will experiment with several values for the number of hidden channel in the model. We will create a Trial to track each training job run. We will also create a TrialComponent from the tracker we created before, and add to the Trial. This will enrich the Trial with the parameters we captured from the data pre-processing stage.Note the execution of the following code takes a while.
###Code
from sagemaker.pytorch import PyTorch, PyTorchModel
hidden_channel_trial_name_map = {}
###Output
_____no_output_____
###Markdown
If you want to run the following training jobs asynchronously, you may need to increase your resource limit. Otherwise, you can run them sequentially.
###Code
preprocessing_trial_component = tracker.trial_component
for i, num_hidden_channel in enumerate([2, 5, 10, 20, 32]):
# create trial
trial_name = f"cnn-training-job-{num_hidden_channel}-hidden-channels-{int(time.time())}"
cnn_trial = Trial.create(
trial_name=trial_name,
experiment_name=mnist_experiment.experiment_name,
sagemaker_boto_client=sm,
)
hidden_channel_trial_name_map[num_hidden_channel] = trial_name
# associate the proprocessing trial component with the current trial
cnn_trial.add_trial_component(preprocessing_trial_component)
# all input configurations, parameters, and metrics specified in estimator
# definition are automatically tracked
estimator = PyTorch(
py_version='py3',
entry_point='./mnist.py',
role=role,
sagemaker_session=sagemaker.Session(sagemaker_client=sm),
framework_version='1.1.0',
instance_count=1,
instance_type='ml.c4.xlarge',
hyperparameters={
'epochs': 2,
'backend': 'gloo',
'hidden_channels': num_hidden_channel,
'dropout': 0.2,
'kernel_size': 5,
'optimizer': 'sgd'
},
metric_definitions=[
{'Name':'train:loss', 'Regex':'Train Loss: (.*?);'},
{'Name':'test:loss', 'Regex':'Test Average loss: (.*?),'},
{'Name':'test:accuracy', 'Regex':'Test Accuracy: (.*?)%;'}
],
enable_sagemaker_metrics=True
)
cnn_training_job_name = "cnn-training-job-{}".format(int(time.time()))
# Now associate the estimator with the Experiment and Trial
estimator.fit(
inputs={'training': inputs},
job_name=cnn_training_job_name,
experiment_config={
"TrialName": cnn_trial.trial_name,
"TrialComponentDisplayName": "Training",
},
wait=True,
)
# give it a while before dispatching the next training job
time.sleep(2)
###Output
_____no_output_____
###Markdown
Compare the model training runs for an experimentNow we will use the analytics capabilities of Python SDK to query and compare the training runs for identifying the best model produced by our experiment. You can retrieve trial components by using a search expression. Some Simple Analyses
###Code
search_expression = {
"Filters":[
{
"Name": "DisplayName",
"Operator": "Equals",
"Value": "Training",
}
],
}
trial_component_analytics = ExperimentAnalytics(
sagemaker_session=Session(sess, sm),
experiment_name=mnist_experiment.experiment_name,
search_expression=search_expression,
sort_by="metrics.test:accuracy.max",
sort_order="Descending",
metric_names=['test:accuracy'],
parameter_names=['hidden_channels', 'epochs', 'dropout', 'optimizer']
)
trial_component_analytics.dataframe()
###Output
_____no_output_____
###Markdown
To isolate and measure the impact of change in hidden channels on model accuracy, we vary the number of hidden channel and fix the value for other hyperparameters.Next let's look at an example of tracing the lineage of a model by accessing the data tracked by SageMaker Experiments for `cnn-training-job-2-hidden-channels` trial
###Code
lineage_table = ExperimentAnalytics(
sagemaker_session=Session(sess, sm),
search_expression={
"Filters":[{
"Name": "Parents.TrialName",
"Operator": "Equals",
"Value": hidden_channel_trial_name_map[2]
}]
},
sort_by="CreationTime",
sort_order="Ascending",
)
lineage_table.dataframe()
###Output
_____no_output_____
###Markdown
Deploy endpoint for the best training-job / trial componentNow we'll take the best (as sorted) and create an endpoint for it.
###Code
#Pulling best based on sort in the analytics/dataframe so first is best....
best_trial_component_name = trial_component_analytics.dataframe().iloc[0]['TrialComponentName']
best_trial_component = TrialComponent.load(best_trial_component_name)
model_data = best_trial_component.output_artifacts['SageMaker.ModelArtifact'].value
env = {'hidden_channels': str(int(best_trial_component.parameters['hidden_channels'])),
'dropout': str(best_trial_component.parameters['dropout']),
'kernel_size': str(int(best_trial_component.parameters['kernel_size']))}
model = PyTorchModel(
model_data,
role,
'./mnist.py',
py_version='py3',
env=env,
sagemaker_session=sagemaker.Session(sagemaker_client=sm),
framework_version='1.1.0',
name=best_trial_component.trial_component_name)
predictor = model.deploy(
instance_type='ml.m5.xlarge',
initial_instance_count=1)
###Output
_____no_output_____
###Markdown
CleanupOnce we're doing don't forget to clean up the endpoint to prevent unnecessary billing.> Trial components can exist independent of trials and experiments. You might want keep them if you plan on further exploration. If so, comment out tc.delete()
###Code
predictor.delete_endpoint()
mnist_experiment.delete_all(action='--force')
###Output
_____no_output_____
###Markdown
MNIST Handwritten Digits Classification ExperimentThis demo shows how you can use SageMaker Experiment Management Python SDK to organize, track, compare, and evaluate your machine learning (ML) model training experiments.You can track artifacts for experiments, including data sets, algorithms, hyper-parameters, and metrics. Experiments executed on SageMaker such as SageMaker Autopilot jobs and training jobs will be automatically tracked. You can also track artifacts for additional steps within an ML workflow that come before/after model training e.g. data pre-processing or post-training model evaluation.The APIs also let you search and browse your current and past experiments, compare experiments, and identify best performing models.Now we will demonstrate these capabilities through an MNIST handwritten digits classification example. The experiment will be organized as follow:1. Download and prepare the MNIST dataset.2. Train a Convolutional Neural Network (CNN) Model. Tune the hyper parameter that configures the number of hidden channels in the model. Track the parameter configurations and resulting model accuracy using SageMaker Experiments Python SDK.3. Finally use the search and analytics capabilities of Python SDK to search, compare and evaluate the performance of all model versions generated from model tuning in Step 2.4. We will also see an example of tracing the complete linage of a model version i.e. the collection of all the data pre-processing and training configurations and inputs that went into creating that model version.Make sure you selected `Python 3 (Data Science)` kernel. Install Python SDKs
###Code
import sys
!{sys.executable} -m pip install sagemaker-experiments==0.1.24
###Output
_____no_output_____
###Markdown
Install PyTroch
###Code
# pytorch version needs to be the same in both the notebook instance and the training job container
# https://github.com/pytorch/pytorch/issues/25214
!{sys.executable} -m pip install torch==1.1.0
!{sys.executable} -m pip install torchvision==0.3.0
!{sys.executable} -m pip install pillow==6.2.2
!{sys.executable} -m pip install --upgrade sagemaker
###Output
_____no_output_____
###Markdown
Setup
###Code
import time
import boto3
import numpy as np
import pandas as pd
from IPython.display import set_matplotlib_formats
from matplotlib import pyplot as plt
from torchvision import datasets, transforms
import sagemaker
from sagemaker import get_execution_role
from sagemaker.session import Session
from sagemaker.analytics import ExperimentAnalytics
from smexperiments.experiment import Experiment
from smexperiments.trial import Trial
from smexperiments.trial_component import TrialComponent
from smexperiments.tracker import Tracker
set_matplotlib_formats('retina')
sess = boto3.Session()
sm = sess.client('sagemaker')
role = get_execution_role()
###Output
_____no_output_____
###Markdown
Create a S3 bucket to hold data
###Code
# create a s3 bucket to hold data, note that your account might already created a bucket with the same name
account_id = sess.client('sts').get_caller_identity()["Account"]
bucket = 'sagemaker-experiments-{}-{}'.format(sess.region_name, account_id)
prefix = 'mnist'
try:
if sess.region_name == "us-east-1":
sess.client('s3').create_bucket(Bucket=bucket)
else:
sess.client('s3').create_bucket(Bucket=bucket,
CreateBucketConfiguration={'LocationConstraint': sess.region_name})
except Exception as e:
print(e)
###Output
_____no_output_____
###Markdown
DatasetWe download the MNIST hand written digits dataset, and then apply transformation on each of the image.
###Code
# download the dataset
# this will not only download data to ./mnist folder, but also load and transform (normalize) them
train_set = datasets.MNIST('mnist', train=True, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]),
download=True)
test_set = datasets.MNIST('mnist', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]),
download=False)
plt.imshow(train_set.data[2].numpy())
###Output
_____no_output_____
###Markdown
After transforming the images in the dataset, we upload it to s3.
###Code
inputs = sagemaker.Session().upload_data(path='mnist', bucket=bucket, key_prefix=prefix)
print('input spec: {}'.format(inputs))
###Output
_____no_output_____
###Markdown
Now lets track the parameters from the data pre-processing step.
###Code
with Tracker.create(display_name="Preprocessing", sagemaker_boto_client=sm) as tracker:
tracker.log_parameters({
"normalization_mean": 0.1307,
"normalization_std": 0.3081,
})
# we can log the s3 uri to the dataset we just uploaded
tracker.log_input(name="mnist-dataset", media_type="s3/uri", value=inputs)
###Output
_____no_output_____
###Markdown
Step 1 - Set up the ExperimentCreate an experiment to track all the model training iterations. Experiments are a great way to organize your data science work. You can create experiments to organize all your model development work for : [1] a business use case you are addressing (e.g. create experiment named “customer churn prediction”), or [2] a data science team that owns the experiment (e.g. create experiment named “marketing analytics experiment”), or [3] a specific data science and ML project. Think of it as a “folder” for organizing your “files”. Create an Experiment
###Code
mnist_experiment = Experiment.create(
experiment_name=f"mnist-hand-written-digits-classification-{int(time.time())}",
description="Classification of mnist hand-written digits",
sagemaker_boto_client=sm)
print(mnist_experiment)
###Output
_____no_output_____
###Markdown
Step 2 - Track Experiment Now create a Trial for each training run to track the it's inputs, parameters, and metrics.While training the CNN model on SageMaker, we will experiment with several values for the number of hidden channel in the model. We will create a Trial to track each training job run. We will also create a TrialComponent from the tracker we created before, and add to the Trial. This will enrich the Trial with the parameters we captured from the data pre-processing stage.Note the execution of the following code takes a while.
###Code
from sagemaker.pytorch import PyTorch, PyTorchModel
hidden_channel_trial_name_map = {}
###Output
_____no_output_____
###Markdown
If you want to run the following training jobs asynchronously, you may need to increase your resource limit. Otherwise, you can run them sequentially.
###Code
preprocessing_trial_component = tracker.trial_component
for i, num_hidden_channel in enumerate([2, 5, 10, 20, 32]):
# create trial
trial_name = f"cnn-training-job-{num_hidden_channel}-hidden-channels-{int(time.time())}"
cnn_trial = Trial.create(
trial_name=trial_name,
experiment_name=mnist_experiment.experiment_name,
sagemaker_boto_client=sm,
)
hidden_channel_trial_name_map[num_hidden_channel] = trial_name
# associate the proprocessing trial component with the current trial
cnn_trial.add_trial_component(preprocessing_trial_component)
# all input configurations, parameters, and metrics specified in estimator
# definition are automatically tracked
estimator = PyTorch(
py_version='py3',
entry_point='./mnist.py',
role=role,
sagemaker_session=sagemaker.Session(sagemaker_client=sm),
framework_version='1.1.0',
instance_count=1,
instance_type='ml.c4.xlarge',
hyperparameters={
'epochs': 2,
'backend': 'gloo',
'hidden_channels': num_hidden_channel,
'dropout': 0.2,
'kernel_size': 5,
'optimizer': 'sgd'
},
metric_definitions=[
{'Name':'train:loss', 'Regex':'Train Loss: (.*?);'},
{'Name':'test:loss', 'Regex':'Test Average loss: (.*?),'},
{'Name':'test:accuracy', 'Regex':'Test Accuracy: (.*?)%;'}
],
enable_sagemaker_metrics=True
)
cnn_training_job_name = "cnn-training-job-{}".format(int(time.time()))
# Now associate the estimator with the Experiment and Trial
estimator.fit(
inputs={'training': inputs},
job_name=cnn_training_job_name,
experiment_config={
"TrialName": cnn_trial.trial_name,
"TrialComponentDisplayName": "Training",
},
wait=True,
)
# give it a while before dispatching the next training job
time.sleep(2)
###Output
_____no_output_____
###Markdown
Compare the model training runs for an experimentNow we will use the analytics capabilities of Python SDK to query and compare the training runs for identifying the best model produced by our experiment. You can retrieve trial components by using a search expression. Some Simple Analyses
###Code
search_expression = {
"Filters":[
{
"Name": "DisplayName",
"Operator": "Equals",
"Value": "Training",
}
],
}
trial_component_analytics = ExperimentAnalytics(
sagemaker_session=Session(sess, sm),
experiment_name=mnist_experiment.experiment_name,
search_expression=search_expression,
sort_by="metrics.test:accuracy.max",
sort_order="Descending",
metric_names=['test:accuracy'],
parameter_names=['hidden_channels', 'epochs', 'dropout', 'optimizer']
)
trial_component_analytics.dataframe()
###Output
_____no_output_____
###Markdown
To isolate and measure the impact of change in hidden channels on model accuracy, we vary the number of hidden channel and fix the value for other hyperparameters.Next let's look at an example of tracing the lineage of a model by accessing the data tracked by SageMaker Experiments for `cnn-training-job-2-hidden-channels` trial
###Code
lineage_table = ExperimentAnalytics(
sagemaker_session=Session(sess, sm),
search_expression={
"Filters":[{
"Name": "Parents.TrialName",
"Operator": "Equals",
"Value": hidden_channel_trial_name_map[2]
}]
},
sort_by="CreationTime",
sort_order="Ascending",
)
lineage_table.dataframe()
###Output
_____no_output_____
###Markdown
Deploy endpoint for the best training-job / trial componentNow we'll take the best (as sorted) and create an endpoint for it.
###Code
#Pulling best based on sort in the analytics/dataframe so first is best....
best_trial_component_name = trial_component_analytics.dataframe().iloc[0]['TrialComponentName']
best_trial_component = TrialComponent.load(best_trial_component_name)
model_data = best_trial_component.output_artifacts['SageMaker.ModelArtifact'].value
env = {'hidden_channels': str(int(best_trial_component.parameters['hidden_channels'])),
'dropout': str(best_trial_component.parameters['dropout']),
'kernel_size': str(int(best_trial_component.parameters['kernel_size']))}
model = PyTorchModel(
model_data,
role,
'./mnist.py',
py_version='py3',
env=env,
sagemaker_session=sagemaker.Session(sagemaker_client=sm),
framework_version='1.1.0',
name=best_trial_component.trial_component_name)
predictor = model.deploy(
instance_type='ml.m5.xlarge',
initial_instance_count=1)
###Output
_____no_output_____
###Markdown
CleanupOnce we're doing don't forget to clean up the endpoint to prevent unnecessary billing.> Trial components can exist independent of trials and experiments. You might want keep them if you plan on further exploration. If so, comment out tc.delete()
###Code
predictor.delete_endpoint()
mnist_experiment.delete_all(action='--force')
###Output
_____no_output_____ |
Data_Vizualization/Pie-Charts-Box-Plots-Scatter-Plots-and-Bubble-Plots.ipynb | ###Markdown
Pie Charts, Box Plots, Scatter Plots, and Bubble PlotsMateus Victor GitHub: mateusvictor Setup and Prepping Data
###Code
import numpy as np
import pandas as pd
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.style.use('ggplot') # optional: for ggplot-like style
# check for latest version of Matplotlib
print('Matplotlib version: ', mpl.__version__) # >= 2.0.0
# Download and convert the excel to a pandas dataframe
df_can = pd.read_excel('https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork/Data%20Files/Canada.xlsx',
sheet_name='Canada by Citizenship',
skiprows=range(20),
skipfooter=2
)
df_can.head()
# Remove unnecessary columns
df_can.drop(['AREA', 'REG', 'DEV', 'Type', 'Coverage'], axis=1, inplace=True)
# Rename the columns
df_can.rename(columns={'OdName':'Country' ,'AreaName':'Continent', 'RegName': 'Region'}, inplace=True)
# Convert the name of the columns to strings
df_can.columns = list(map(str, df_can.columns))
# Set the column 'Countries' as index
df_can.set_index('Country', inplace=True)
# Add a total column to the data set
df_can['Total'] = df_can.sum(axis=1)
# List of years to use soon
years = list(map(str, range(1980, 2014)))
# Data Cleaned!
df_can.head()
###Output
_____no_output_____
###Markdown
Pie Charts A `pie chart` is a circualr graphic that displays numeric proportions by dividing a circle (or pie) into proportional slices. You are most likely already familiar with pie charts as it is widely used in business and media. We can create pie charts in Matplotlib by passing in the `kind=pie` keyword. **Step 1: Gather Data**
###Code
# First lets group the countries by ocntinents and apply sum() function
df_continents = df_can.groupby('Continent', axis=0).sum()
# note: the type of a group by method is a groupby object so to use it, we have to apply a function (.sum())
df_continents
###Output
_____no_output_____
###Markdown
**Step 2: Plot**- `autopct` - is a string or function used to label the wedges with their numeric value. The label will be placed inside the wedge. If it is a format string, the label will be `fmt%pct`.- `startangle` - rotates the start of the pie chart by angle degrees counterclockwise from the x-axis.- `shadow` - Draws a shadow beneath the pie (to give a 3D feel).
###Code
# autopct create %, start angle represent starting point
df_continents['Total'].plot(kind='pie',
figsize=(5, 6),
autopct='%1.1f%%', # add in percentages
startangle=90,
shadow=True,
)
plt.title('Immigration to Canada by Continent [1980 - 2013]')
plt.axis('equal')
plt.show()
###Output
_____no_output_____
###Markdown
To make the above visual more clear, let's make some modifications like: * Remove the text labels on the pie chart by passing in a plt.legend() * Pass more attractive set of colors * Explode to emphasize the lowest three continents
###Code
colors_list = ['gold', 'yellowgreen', 'lightcoral', 'lightskyblue', 'lightgreen', 'pink']
explode_list = [0.1, 0, 0, 0, 0.1, 0.1] # ratio for each continent with which to offset each wedge.
df_continents['Total'].plot(kind='pie',
figsize=(15, 6),
autopct='%1.1f%%',
startangle=90,
shadow=True,
labels=None, # turn off labels on pie chart
pctdistance=1.12, # the ratio between the center of each pie slice and the start of the text generated by autopct
colors=colors_list, # add custom colors
explode=explode_list # 'explode' lowest 3 continents
)
# scale the title up by 12% to match pctdistance
plt.title('Immigration to Canada by Continent [1980 - 2013]', y=1.12)
plt.axis('equal')
# add legend
plt.legend(labels=df_continents.index, loc='upper left')
plt.show()
###Output
_____no_output_____
###Markdown
Box Plots A `box plot` is a way of statistically representing the _distribution_ of the data through five main dimensions: - **Minimun:** Smallest number in the dataset.- **First quartile:** Middle number between the `minimum` and the `median`.- **Second quartile (Median):** Middle number of the (sorted) dataset.- **Third quartile:** Middle number between `median` and `maximum`.- **Maximum:** Highest number in the dataset.
###Code
# Let's plot the box plot fro, the japanese immigrants between 1980 - 2013
# get the dataset obtaining as a dataframe
df_japan = df_can.loc[['Japan'], years].transpose()
df_japan.head()
df_japan.plot(kind='box', figsize=(8, 6))
plt.title('Box plot of Japanese Immigrants from 1980 - 2013')
plt.ylabel('Number of Immigrants')
plt.show()
df_japan.describe()
###Output
_____no_output_____
###Markdown
Ploting multiple box plots: Let's create a box plot to visualize the distribution of the top 15 countries (based on total immigration) grouped by the _decades_ `1980s`, `1990s`, and `2000s`.
###Code
# First we have to get the data
df_top15 = df_can.sort_values('Total', ascending=False).head(15)
# Creating lists with de decades ranges to help us
years80s = list(map(str, range(1980, 1990)))
years90s = list(map(str, range(1990, 2000)))
years00s = list(map(str, range(2000, 2010)))
# Creating the data frames
df_80s = df_top15.loc[:,years80s].sum(axis=1)
df_90s = df_top15.loc[:,years90s].sum(axis=1)
df_00s = df_top15.loc[:,years00s].sum(axis=1)
# Convert to one DataFrame
df_decades= pd.DataFrame({'1980s': df_80s, '1990s':df_90s, '2000': df_00s})
df_decades.head()
# Ploting
df_decades.plot(kind='box',
figsize=(10, 8))
plt.title('Immigrations from the top 15 countries per decade')
plt.show()
###Output
_____no_output_____
###Markdown
Note how the box plot differs from the summary table created. The box plot scans the data and identifies the outliers. In order to be an outlier, the data value must be:- larger than Q3 by at least 1.5 times the interquartile range (IQR), or,- smaller than Q1 by at least 1.5 times the IQR.Let's look at decade 2000s as an example: - Q1 (25%) = 36,101.5 - Q3 (75%) = 105,505.5 - IQR = Q3 - Q1 = 69,404 Using the definition of outlier, any value that is greater than Q3 by 1.5 times IQR will be flagged as outlier.Outlier > 105,505.5 + (1.5 * 69,404) Outlier > 209,611.5 Scatter plot A `scatter plot` (2D) is a useful method of comparing variables against each other. `Scatter` plots look similar to `line plots` in that they both map independent and dependent variables on a 2D graph. While the datapoints are connected together by a line in a line plot, they are not connected in a scatter plot. The data in a scatter plot is considered to express a trend. With further analysis using tools like regression, we can mathematically calculate this relationship and use it to predict trends outside the dataset.Let's see the relationship between the years and the total
###Code
# Get the data
# Use sum() to get the total population per year
df_tot = pd.DataFrame(df_can[years].sum(axis=0))
# Change the years to int, useful soon
df_tot.index = list(map(int, df_tot.index))
# Reset the index making the years a column
df_tot.reset_index(inplace=True)
# Renaming the columns
df_tot.columns = ['years', 'total']
df_tot.head()
# Ploting
df_tot.plot(kind='scatter',
x = 'years',
y = 'total',
figsize = (10, 6),
color = 'darkblue')
plt.title('Immigrations to Canada from 1980 to 2013')
plt.xlabel('Years')
plt.ylabel('Total')
plt.show()
###Output
_____no_output_____
###Markdown
Bubble Plots A `bubble plot` is a variation of the `scatter plot` that displays three dimensions of data (x, y, z). The datapoints are replaced with bubbles, and the size of the bubble is determined by the third variable 'z', also known as the weight. In `maplotlib`, we can pass in an array or scalar to the keyword `s` to `plot()`, that contains the weight of each point.**Let's start by analyzing the effect of Argentina's great depression**.Argentina suffered a great depression from 1998 - 2002, which caused widespread unemployment, riots, the fall of the government, and a default on the country's foreign debt. In terms of income, over 50% of Argentines were poor, and seven out of ten Argentine children were poor at the depth of the crisis in 2002.
###Code
# Get the data
df_can_t = df_can[years].transpose()
# Cast the index to type int
df_can_t.index = map(int, df_can_t.index)
# Label the index, future column name
df_can_t.index.name = 'Year'
# Reset the index to bring the Year in as column
df_can_t.reset_index(inplace = True)
df_can_t.head()
# Normalize the Brazil and Argentina data using feature scaling
norm_brazil = (df_can_t['Brazil'] - df_can_t['Brazil'].min()) / (df_can_t['Brazil'].max() - df_can_t['Brazil'].min())
norm_argentina = (df_can_t['Argentina'] - df_can_t['Argentina'].min()) / (df_can_t['Argentina'].max() - df_can_t['Argentina'].min())
# Plot two different two scatter plot in one plot using axes
#Brazil
ax0 = df_can_t.plot(kind='scatter',
x='Year',
y='Brazil',
figsize = (14, 8),
alpha=0.5,
color='green',
s=norm_brazil * 2000 + 10,
xlim=(1975, 2015)
)
# Argentina
ax1 = df_can_t.plot(kind='scatter',
x='Year',
y='Argentina',
alpha=0.5,
color="blue",
s=norm_argentina * 2000 + 10,
ax = ax0
)
ax0.set_ylabel('Number of Immigrants')
ax0.set_title('Immigration from Brazil and Argentina from 1980 - 2013')
ax0.legend(['Brazil', 'Argentina'], loc='upper left', fontsize='x-large')
###Output
_____no_output_____ |
Set_operations.ipynb | ###Markdown
Операции над множествами Создание множества
###Code
a = {1, 2, 3, 4}
print(a)
print(type(a))
b = set([3, 4, 5, 6])
print(b)
print(type(b))
###Output
{3, 4, 5, 6}
<class 'set'>
###Markdown
Доступ к элементам множества
###Code
# Перечисление в цикле
for element in a:
print(element, end=' ')
# Проверка элемента на вхождение в множество
print(4, a, 4 in a)
print(5, a, 5 in a)
# Проверка элемента на вхождение в множество
{1,2} in a
a
###Output
_____no_output_____
###Markdown
Добавление элементов в множество
###Code
# Добавление одного элемента
a.add(7)
print(a)
# Добавление нескольких элементов
a.update([8,9])
print(a)
###Output
{1, 2, 3, 4, 8, 9}
###Markdown
Удаление элементов из множества
###Code
# Удаление одного элемента (если элемента нет в множестве, то выдает ошибку)
a.remove(9)
print(a)
# Удаление одного элемента (если элемента нет в множестве, то выдает ошибку)
a.remove(9)
print(a)
# Удаление одного элемента (если элемента нет в множестве, то ошибку не выдает)
a.discard(8)
print(a)
# Удаление одного элемента (если элемента нет в множестве, то ошибку не выдает)
a.discard(8)
print(a)
c = a.copy()
print(c)
# Удаление всех элементов
c.clear()
print(c)
###Output
{1, 2, 3, 4, 7}
set()
###Markdown
Объединение множеств
###Code
a | b
a.union(b)
# Множеств может быть больше 2
a.union(b, [8,9], {10,11})
###Output
_____no_output_____
###Markdown
Пересечение множеств
###Code
a & b
a.intersection(b)
###Output
_____no_output_____
###Markdown
Разность множеств
###Code
a - b
a.difference(b)
b - a
b.difference(a)
###Output
_____no_output_____
###Markdown
Cимметричная разность (исключающее ИЛИ: остаются все элементы, кроме общих)
###Code
a^b
a.symmetric_difference(b)
a = {1, 2, 3}
c = a.update([4,5,6])
print(c)
a = {1, 2, 3}
b = {4, 5, 6}
print(a)
a.update(b)
###Output
_____no_output_____ |
notebooks/archive/SCRIPTS/models/lsa/lsa_topic_model.ipynb | ###Markdown
sudo apt-get install default-jdksudo apt-get install antgit clone [email protected]:mimno/Mallet.gitcd Mallet/ant
###Code
Process:
1. Download metadata
2. Download text documents
3. Perform cleaning
4. Apply LDA model
###Output
_____no_output_____
###Markdown
import osimport jsonimport pandas as pd
###Code
## Import the DocsManager notebook
Let's import the DocsManager helper class that manages the loading and filtering of documents from the API.
###Output
_____no_output_____
###Markdown
%%capture LdaMallet Dictionary (gensim) build_docs transform_dt get_tw get_top_words %run ./LDAModule.ipynb DocsManager build_docs%run ../../DocsManager.ipynb Jupyter.notebook.save_checkpoint() get_corpus_path get_txt_clean_path%run ../../path_manager.ipynb get_corpus_path('IMF')
###Code
Let's create a DocsManager instance.
- `metadata_filename`: path to the metadata file generated after scraping the API
- `cleaned_files_dir`: path to the directory where the cleaned files are stored
- `model_output_dir`: path to where model related files will be saved
###Output
_____no_output_____
###Markdown
CORPUS_ID = 'IMF'CORPUS_PART = 'ALL'NUM_TOPICS = 50 MALLET_BINARY_PATH = "../Mallet/bin/mallet"MODELS_PATH = get_models_path('LSA')NUM_WORKERS = 22MODEL_ID = f"{CORPUS_PART}_{NUM_TOPICS}"MODEL_FOLDER = os.path.join(MODELS_PATH, f'{CORPUS_ID}-{MODEL_ID}') MODEL_DATA_FOLDER = os.path.join(MODEL_FOLDER, 'data')if not os.path.isdir(MODEL_DATA_FOLDER): os.makedirs(MODEL_DATA_FOLDER) %%timedocs = build_docs( metadata_filename=os.path.join(get_corpus_path(CORPUS_ID), f'{CORPUS_ID.lower()}_metadata_complete.csv'), cleaned_files_dir=get_txt_clean_path(CORPUS_ID), model_output_dir=MODEL_FOLDER Use flat directory as discussed...)
###Code
Given a `CORPUS_PART`, let's extract the filtered documents. Please check the `DocsManager.ipynb` notebook for additional filter options.
###Output
_____no_output_____
###Markdown
docs.set_min_token_count(100) %%timedocs_filtered, meta = docs.filter_doclist(CORPUS_PART, save=True, return_meta=True, pool_workers=22) meta.head(2) docs_filtered.head(2)
###Code
# LSA model
### Generate gensim dictionary
###Output
_____no_output_____
###Markdown
%%timeg_dict = Dictionary(docs_filtered.text.str.split())g_dict.id2token = {id: token for token, id in g_dict.token2id.items()}
###Code
### Train LDA model using Gensim's Mallet wrapper
###Output
_____no_output_____
###Markdown
corpus = [g_dict.doc2bow(text.split()) for text in docs_filtered.text] MODEL_DATA_FOLDER = os.path.join(MODELS_PATH, f'{CORPUS_ID}-{MODEL_ID}', 'data')if not os.path.isdir(MODEL_DATA_FOLDER): os.makedirs(MODEL_DATA_FOLDER) MODEL_DATA_FOLDER
###Code
# WARNING! Mallet files will be stored in the user home directory.
Ideally, this should be in the /tmp directory but the allocated space is not enough
###Output
_____no_output_____
###Markdown
import logginglogging.basicConfig(filename=f'{CORPUS_ID.lower()}-{MODEL_ID}.log', format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO) %%timemodel = LdaMallet( MALLET_BINARY_PATH, corpus=corpus, num_topics=NUM_TOPICS, prefix='./{CORPUS_ID}-{MODEL_ID}_', id2word=g_dict.id2token, workers=NUM_WORKERS) model.fdoctopics(), model.num_topics
###Code
### Load doc topics
###Output
_____no_output_____
###Markdown
dt = pd.read_csv( model.fdoctopics(), delimiter='\t', header=None, names=[i for i in range(model.num_topics)], index_col=None, usecols=[i + 2 for i in range(model.num_topics)],)dt.index = docs_filtered['id']dt = dt.divide(dt.min(axis=1), axis=0).astype(int) - 1 dt.head(2) dt.head(2)
###Code
### Generate dfr data
###Output
_____no_output_____
###Markdown
ddt = transform_dt(dt.as_matrix().T) ttw = get_tw(model)
###Code
### Store data
###Output
_____no_output_____
###Markdown
with open(os.path.join(MODEL_DATA_FOLDER, 'tw.json'), 'w') as fl: json.dump(ttw, fl) with open(os.path.join(MODEL_DATA_FOLDER, 'dt.json'), 'w') as fl: json.dump(ddt, fl) info_json = { "title": "Topics in WB Documents and Reports API", "meta_info": "This site is the working demo for dfr-browser, a browsing interface for topic models of journal articles or other text.", "VIS": { "condition": { "type": "time", "spec": { "unit": "year", "n": 1 } }, "bib_sort": { "major": "year", "minor": "alpha" }, "model_view": { "plot": { "words": 6, "size_range": [6, 14] } } }}with open(os.path.join(MODEL_DATA_FOLDER, 'info.json'), 'w') as fl: json.dump(info_json, fl)
###Code
# Generation of key LDA files
### doc_topics
###Output
_____no_output_____
###Markdown
dt.to_csv( os.path.join(MODEL_DATA_FOLDER, f'doc_topics_{MODEL_ID}.csv'), header=False, Change to True if topic id should be present as the header index=False Change to True if the uid should be present as the index)
###Code
### topic_words
###Output
_____no_output_____
###Markdown
word_topics = pd.DataFrame(model.word_topics, columns=range(model.word_topics.shape[1]), index=range(1, model.word_topics.shape[0] + 1))word_topics = word_topics.rename(columns=model.id2word) word_topics.head() word_topics.astype(int).to_csv( os.path.join(MODEL_DATA_FOLDER, f'topic_words_{MODEL_ID}.csv'), header=False, Change to True if actual word should be present as the header index=False Change to True if the topic id should be present as the index)
###Code
### top_words
###Output
_____no_output_____
###Markdown
top_words = get_top_words(word_topics, topic=None, topn=50) top_words.head(2) top_words.to_csv( os.path.join(MODEL_DATA_FOLDER, f'top_words_{MODEL_ID}.csv'), index=False Change to True if the topic id should be present as the index) %%timemodel.save(os.path.join(MODEL_DATA_FOLDER, f'{CORPUS_ID}_lda_model_{MODEL_ID}.lda')) ls -lh saved_lda_model.lda
###Code
# Find closest document by Euclidean distance
Use functions defined in `LDAModule.ipynb`: `close_docs`
###Output
_____no_output_____
###Markdown
We generate a function that will find and list the N documents closest to a selected one close_docs <- function(docid, numclose) { indx <- which(s$uid == docid) mxcol = 24 + as.numeric(model) x1 <- s[indx, 25:mxcol] neighbors <- s[, 25:mxcol] dist <- pdist(neighbors, x1) similar <- cbind(s, dist@dist) similar <- similar[order(dist@dist),] head(similar[, c(1,5,6,8,9,11,15)], numclose) The first in the list is the document itself } close_docs(10575832, 21) close_docs(27761347, 21) doc_ids = close_docs(docs, doc_id=20140580, num_docs=10, report=True, dt=dt)
###Code
# Scratch
###Output
_____no_output_____
###Markdown
topic_docs_conditional = function (t, v, key, n) { var p0 = my.dt.p[t], p1 = my.dt.p[t + 1], p, docs = [ ], bisect, insert, i, result = []; // column slice // TODO speed bottleneck: all that row-summing gets slooow // because row-slicing is slow on the col-compressed matrix for (p = p0; p < p1; p += 1) { if (v === undefined // then: unconditional top docs || my.doc_categories[v][my.dt.i[p]] === key) { docs.push({ doc: my.dt.i[p], frac: my.dt.x[p] / my.dt.row_sum(my.dt.i[p]), weight: my.dt.x[p] }); } } // return them all, sorted, if there are fewer than n hits if (n >= docs.length) { docs.sort(function (a, b) { return utils.desc(a.frac, b.frac) || utils.desc(a.doc, b.doc); // stabilize sort }); return docs; } // initial guess. simplifies the conditionals below to do it this way, // and sorting n elements is no biggie result = docs.slice(0, n).sort(function (a, b) { return utils.asc(a.frac, b.frac) || utils.asc(a.doc, b.doc); // stabilize sort }); bisect = utils.bisector_left(function (d) { return d.frac; }); for (i = n; i < docs.length; i += 1) { insert = bisect(result, docs[i].frac); if (insert > 0) { result.splice(insert, 0, docs[i]); result.shift(); } else if (result[0].frac === docs[i].frac) { // insert = 0 but a tie result.unshift(docs[i]); } } // biggest first return utils.shorten(result.reverse(), n, function (xs, i) { return xs[i].frac; });};
###Code
ddt['p'][0:2]
ddt['i'][10000]
ddt['x'][10000]
dt.as_matrix().T
import requests
dfr_dt = 'http://microdatahub.com/topicsmodeling/dfr/topic_browser/model.php?type=dt&model=data50_SAR'
dfr_dt = requests.get(dfr_dt)
dfr_dt = ddt # dfr_dt.json()
# http://microdatahub.com/topicsmodeling/dfr/topic_browser/browser.php?model=data50_SAR&type=SAR&topic_count=50#/doc/9622
did = 8130
ps_9622 = [p for p in range(0, dfr_dt['p'][-1]) if dfr_dt['i'][p] == did]
for t in range(50):
p0 = dfr_dt['p'][t]
p1 = dfr_dt['p'][t + 1]
pt_9622 = [p for p in range(p0, p1) if dfr_dt['i'][p] == did]
try:
raw = dfr_dt['x'][pt_9622[0]]
w = raw / sum(dfr_dt['x'][p] for p in ps_9622)
print(t, raw, w)
except:
continue
dt.T.sum()
[p for p in range(0, dfr_dt['p'][-1]) if dfr_dt['i'][p] == 10490]
wbdocs.doclist[wbdocs.doclist.uid == 27164047].tokens
wbdocs.doclist[wbdocs.doclist.uid == 29935714].tokens
WBdocs_filtered.shape
dfr_dt['p'][-1]
dt.shape
###Output
_____no_output_____ |
170505_seurat/seurat_R.ipynb | ###Markdown
*First compiled: May 5, 2017.* Profiling Seurat's guided clustering tutorial for 3k PBMC cells
###Code
library(Seurat)
library(dplyr)
library(Matrix)
# Load the PBMC dataset
pbmc.data <- Read10X("data/pbmc3k_filtered_gene_bc_matrices/hg19")
previous_time <- proc.time()[3]
# Initialize the Seurat object with the raw (non-normalized data)
# Note that this is slightly different than the older Seurat workflow, where log-normalized values were passed in directly.
# You can continue to pass in log-normalized values, just set do.logNormalize=F in the next step.
pbmc <- new("seurat", raw.data = pbmc.data)
# Keep all genes expressed in >= 3 cells, keep all cells with >= 200 genes
# Perform log-normalization, first scaling each cell to a total of 1e4 molecules (as in Macosko et al. Cell 2015)
pbmc <- Setup(pbmc, min.cells = 3, min.genes = 200, do.logNormalize = T, total.expr = 1e4, project = "10X_PBMC")
proc.time()[3] - previous_time
# The number of genes and UMIs (nGene and nUMI) are automatically calculated for every object by Seurat.
# For non-UMI data, nUMI represents the sum of the non-normalized values within a cell
# We calculate the percentage of mitochondrial genes here and store it in percent.mito using the AddMetaData.
# The % of UMI mapping to MT-genes is a common scRNA-seq QC metric.
# NOTE: You must have the Matrix package loaded to calculate the percent.mito values.
mito.genes <- grep("^MT-", rownames(pbmc@data), value = T)
percent.mito <- colSums(expm1(pbmc@data[mito.genes, ]))/colSums(expm1(pbmc@data))
#AddMetaData adds columns to [email protected], and is a great place to stash QC stats
pbmc <- AddMetaData(pbmc, percent.mito, "percent.mito")
VlnPlot(pbmc, c("nGene", "nUMI", "percent.mito"), nCol = 3)
#GenePlot is typically used to visualize gene-gene relationships, but can be used for anything calculated by the object, i.e. columns in [email protected], PC scores etc.
#Since there is a rare subset of cells with an outlier level of high mitochondrial percentage, and also low UMI content, we filter these as well
par(mfrow = c(1, 2))
GenePlot(pbmc, "nUMI", "percent.mito")
GenePlot(pbmc, "nUMI", "nGene")
#We filter out cells that have unique gene counts over 2,500
#Note that accept.high and accept.low can be used to define a 'gate', and can filter cells not only based on nGene but on anything in the object (as in GenePlot above)
pbmc <- SubsetData(pbmc, subset.name = "nGene", accept.high = 2500)
pbmc <- SubsetData(pbmc, subset.name = "percent.mito", accept.high = 0.05)
previous_time <- proc.time()[3]
#note that this overwrites [email protected]. Therefore, if you intend to use RegressOut, you can set do.scale=F and do.center=F in the original object to save some time.
pbmc <- RegressOut(pbmc, latent.vars = c("nUMI", "percent.mito"))
proc.time()[3] - previous_time
pbmc <- MeanVarPlot(pbmc ,fxn.x = expMean, fxn.y = logVarDivMean, x.low.cutoff = 0.0125, x.high.cutoff = 3, y.cutoff = 0.5, do.contour = F)
previous_time <- proc.time()[3]
pbmc <- PCA(pbmc, pc.genes = [email protected], do.print = TRUE, pcs.print = 5, genes.print = 5)
proc.time()[3] - previous_time
PCAPlot(pbmc, 1, 2)
PCElbowPlot(pbmc)
previous_time <- proc.time()[3]
#save.SNN=T saves the SNN so that the SLM algorithm can be rerun using the same graph, but with a different resolution value (see docs for full details)
pbmc <- FindClusters(pbmc, pc.use = 1:10, resolution = 0.6, print.output = 0, save.SNN = T)
proc.time()[3] - previous_time
previous_time <- proc.time()[3]
pbmc <- RunTSNE(pbmc, dims.use = 1:10, do.fast = T)
proc.time()[3] - previous_time
previous_time <- proc.time()[3]
# find all markers of cluster 1
cluster1.markers <- FindMarkers(pbmc, ident.1 = 1, min.pct = 0.25)
print(head(cluster1.markers, 5))
proc.time()[3] - previous_time
previous_time <- proc.time()[3]
# find all markers distinguishing cluster 5 from clusters 0 and 3
cluster5.markers <- FindMarkers(pbmc, 5, c(0,3), min.pct = 0.25)
print(head(cluster5.markers, 5))
proc.time()[3] - previous_time
previous_time <- proc.time()[3]
# find markers for every cluster compared to all remaining cells, report only the positive ones
pbmc.markers <- FindAllMarkers(pbmc, only.pos = TRUE, min.pct = 0.25, thresh.use = 0.25)
pbmc.markers %>% group_by(cluster) %>% top_n(2, avg_diff)
proc.time()[3] - previous_time
###Output
_____no_output_____ |
dev/21_tutorial_imagenette.ipynb | ###Markdown
Tutorial: Training a model on Imagenette> Regular training with square images and rectangular training Square training Loading the data with `DataSource` To load the data with the mdeium-level API `DataSource`, we need to gather all the images and define some way to split them betweem training and validation.
###Code
source = untar_data(URLs.IMAGENETTE_160)
items = get_image_files(source)
split_idx = GrandparentSplitter(valid_name='val')(items)
###Output
_____no_output_____
###Markdown
Then we detail the type transforms (applied to the items to form a tuple) and the dataset transforms. For our inputs we use `PILImage.create` and for our targets, the `parent_label` function to convert a filename to its class, followed by `Categorize`.The dataset transforms contain data augmentation using PIL and a resize to 128.
###Code
tfms = [PILImage.create, [parent_label, Categorize()]]
ds_img_tfms = [ToTensor(), FlipItem(0.5), RandomResizedCrop(128, min_scale=0.35)]
###Output
_____no_output_____
###Markdown
We can then pass all of this informtaiton to `DataSource`.
###Code
dsrc = DataSource(items, tfms, filts=split_idx)
###Output
_____no_output_____
###Markdown
To convert our `DataSource` to a `DataBunch`, we need to indicate the transforms we want to use at the batch level, here putting on the GPU aith `Cuda`, converting the tensors of bytes to float then normalizing using the traditional imagenet statistics.
###Code
imagenet_stats = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
imagenet_stats = broadcast_vec(1, 4, *imagenet_stats)
dl_tfms = [Cuda(), ByteToFloatTensor(), Normalize(*imagenet_stats)]
dbch = dsrc.databunch(after_item=ds_img_tfms, after_batch=dl_tfms, bs=64, num_workers=0)
dbch.show_batch(max_n=9)
###Output
_____no_output_____
###Markdown
Loading the data with `DataBlock` An easier way is to use the `DataBlock` higher-level API. We just need to specify the types, how to get the items, how to split them and how to label to build an Imagenette datablock.
###Code
imagenette = DataBlock(ts=(PILImage, Category),
get_items=get_image_files,
splitter=GrandparentSplitter(valid_name='val'),
get_y=parent_label)
###Output
_____no_output_____
###Markdown
We can then directly call the `databunch` method when specifying a source (where the items are) and the non-default dataset and dataloader transforms. To check which transforms are included by default (inferred from the types passed), we can check (and poentially modify) the attributes `default_type_tfms`, `default_ds_tfms` and `default_dl_tfms` of the `imagenette` object.
###Code
imagenette.default_type_tfms,imagenette.default_ds_tfms,imagenette.default_dl_tfms
###Output
_____no_output_____
###Markdown
Here we need to add the data augmentation and resize, as well as the normalization.
###Code
dbch = imagenette.databunch(source, bs=64, num_workers=8, ds_tfms=ds_img_tfms, dl_tfms=Normalize(*imagenet_stats))
dbch.show_batch(max_n=9)
###Output
_____no_output_____
###Markdown
Training The following function will give us a `Learner` to train a model on imagenette.
###Code
def cnn_learner(arch, dbunch, loss_func, opt_func, c_in=None, c_out=None,
lr=1e-2, progress=True, mixup=0, xtra_cb=None, **kwargs):
cbfs = [MixedPrecision]
arch_args = {}
if not c_in : c_in = data.c_in
if not c_out: c_out = data.c_out
if c_in: arch_args['c_in' ]=c_in
if c_out: arch_args['c_out']=c_out
return Learner(dbunch, arch(**arch_args), loss_func, opt_func=opt_func, lr=lr, cb_funcs=cbfs, **kwargs)
opt_func = partial(Adam, wd=0.01, eps=1e-3)
###Output
_____no_output_____
###Markdown
To use label smoothing, we define a custom loss function.
###Code
class LabelSmoothingCrossEntropy(Module):
def __init__(self, eps:float=0.1, reduction='mean'): self.eps,self.reduction = eps,reduction
def forward(self, output, target):
c = output.size()[-1]
log_preds = F.log_softmax(output, dim=-1)
if self.reduction=='sum': loss = -log_preds.sum()
else:
loss = -log_preds.sum(dim=-1)
if self.reduction=='mean': loss = loss.mean()
return loss*self.eps/c + (1-self.eps) * F.nll_loss(log_preds, target, reduction=self.reduction)
learn = cnn_learner(xresnet18, dbch, LabelSmoothingCrossEntropy(), opt_func=opt_func, c_in=3, c_out=10, lr=1e-2, metrics=accuracy)
###Output
_____no_output_____
###Markdown
Then we can train our model.
###Code
learn.fit_one_cycle(1)
###Output
_____no_output_____
###Markdown
Rect training (not working well) For a rectangular training, we change the dataset transforms to use the flip only. We will resize the images when it's time to batch them only.
###Code
#img_tfms = [FlipItem(0.5)]
#tfms = [PILImage.create, [parent_label, Categorize()]]
#dsrc = DataSource(items, tfms, filts=split_idx, ds_tfms=img_tfms)
#tfms = [Cuda(), ByteToFloatTensor(), Normalize(*imagenet_stats)]
#bs = 64
###Output
_____no_output_____
###Markdown
We use a sampler that will group the images by batches of the close size and aspect ratio (with a bit of shuffle for the training set) and a collation function that will resize them to the mdeian aspect ratio and median number of pixel (bound by `max_px`). `rand_min_scale` is used to do a `RandomResizedCrop` to that size on the training set.
###Code
#samp = SortARSampler(dsrc.train, shuffle=True, bs=bs)
#collate_fn = ResizeCollate(max_px=128*128, rand_min_scale=0.35, rand_ratio_pct=0.33, round_mult=32)
#train_dl = TfmdDL(dsrc.train, tfms, num_workers=8, batch_sampler=samp, collate_fn=collate_fn)
#samp = SortARSampler(dsrc.valid, shuffle=False, bs=bs)
#collate_fn = ResizeCollate(max_px=128*128, round_mult=32)
#valid_dl = TfmdDL(dsrc.valid, tfms, num_workers=8, batch_sampler=samp, collate_fn=collate_fn)
###Output
_____no_output_____
###Markdown
Then we create a `DataBunch` with those two dataloaders.
###Code
#dbch1 = imagenette.databunch(source, bs=64, num_workers=8, ds_tfms=ds_img_tfms, dl_tfms=Normalize(*imagenet_stats))
#dbch = DataBunch(train_dl, valid_dl)
#dbch.show_batch(max_n=9)
###Output
_____no_output_____
###Markdown
Training The training then works exactly as before.
###Code
#learn = cnn_learner(xresnet18, dbch, LabelSmoothingCrossEntropy(), opt_func=opt_func, c_in=3, c_out=10, lr=1e-2, metrics=accuracy)
#learn.fit_one_cycle(1)
###Output
_____no_output_____
###Markdown
Tutorial: Training a model on Imagenette> Regular training with square images and rectangular training Square training Loading the data with `DataSource` To load the data with the mdeium-level API `DataSource`, we need to gather all the images and define some way to split them betweem training and validation.
###Code
source = untar_data(URLs.IMAGENETTE_160)
items = get_image_files(source)
split_idx = GrandparentSplitter(valid_name='val')(items)
###Output
_____no_output_____
###Markdown
Then we detail the type transforms (applied to the items to form a tuple) and the dataset transforms. For our inputs we use `PILImage.create` and for our targets, the `paent_label` function to convert a filename to its class, followed by `Categorize`.The dataset transforms contain data augmentation using PIL and a resize to 128.
###Code
tfms = [PILImage.create, [parent_label, Categorize()]]
ds_img_tfms = [ToTensor(), PILFlip(0.5), RandomResizedCrop(128, min_scale=0.35)]
###Output
_____no_output_____
###Markdown
We can then pass all of this informtaiton to `DataSource`.
###Code
dsrc = DataSource(items, tfms, filts=split_idx)
###Output
_____no_output_____
###Markdown
To convert our `DataSource` to a `DataBunch`, we need to indicate the transforms we want to use at the batch level, here putting on the GPU aith `Cuda`, converting the tensors of bytes to float then normalizing using the traditional imagenet statistics.
###Code
imagenet_stats = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
imagenet_stats = broadcast_vec(1, 4, *imagenet_stats)
dl_tfms = [Cuda(), ByteToFloatTensor(), Normalize(*imagenet_stats)]
dbch = dsrc.databunch(after_item=ds_img_tfms, after_batch=dl_tfms, bs=64, num_workers=0)
dbch.show_batch(max_n=9)
###Output
_____no_output_____
###Markdown
Loading the data with `DataBlock` An easier way is to use the `DataBlock` higher-level API. We just need to specify the types, how to get the items, how to split them and how to label to build an Imagenette datablock.
###Code
imagenette = DataBlock(ts=(PILImage, Category),
get_items=get_image_files,
splitter=GrandparentSplitter(valid_name='val'),
get_y=parent_label)
###Output
_____no_output_____
###Markdown
We can then directly call the `databunch` method when specifying a source (where the items are) and the non-default dataset and dataloader transforms. To check which transforms are included by default (inferred from the types passed), we can check (and poentially modify) the attributes `default_type_tfms`, `default_ds_tfms` and `default_dl_tfms` of the `imagenette` object.
###Code
imagenette.default_type_tfms,imagenette.default_ds_tfms,imagenette.default_dl_tfms
###Output
_____no_output_____
###Markdown
Here we need to add the data augmentation and resize, as well as the normalization.
###Code
dbch = imagenette.databunch(source, bs=64, num_workers=8, ds_tfms=ds_img_tfms, dl_tfms=Normalize(*imagenet_stats))
dbch.show_batch(max_n=9)
###Output
_____no_output_____
###Markdown
Training The following function will give us a `Learner` to train a model on imagenette.
###Code
def cnn_learner(arch, data, loss_func, opt_func, c_in=None, c_out=None,
lr=1e-2, progress=True, mixup=0, xtra_cb=None, **kwargs):
cbfs = [MixedPrecision]
arch_args = {}
if not c_in : c_in = data.c_in
if not c_out: c_out = data.c_out
if c_in: arch_args['c_in' ]=c_in
if c_out: arch_args['c_out']=c_out
return Learner(arch(**arch_args), data, loss_func, opt_func=opt_func, lr=lr, cb_funcs=cbfs, **kwargs)
opt_func = partial(Adam, wd=0.01, eps=1e-3)
###Output
_____no_output_____
###Markdown
To use label smoothing, we define a custom loss function.
###Code
class LabelSmoothingCrossEntropy(Module):
def __init__(self, eps:float=0.1, reduction='mean'): self.eps,self.reduction = eps,reduction
def forward(self, output, target):
c = output.size()[-1]
log_preds = F.log_softmax(output, dim=-1)
if self.reduction=='sum': loss = -log_preds.sum()
else:
loss = -log_preds.sum(dim=-1)
if self.reduction=='mean': loss = loss.mean()
return loss*self.eps/c + (1-self.eps) * F.nll_loss(log_preds, target, reduction=self.reduction)
learn = cnn_learner(xresnet18, dbch, LabelSmoothingCrossEntropy(), opt_func=opt_func, c_in=3, c_out=10, lr=1e-2, metrics=accuracy)
###Output
_____no_output_____
###Markdown
Then we can train our model.
###Code
learn.fit_one_cycle(1)
###Output
_____no_output_____
###Markdown
Rect training (not working well) For a rectangular training, we change the dataset transforms to use the flip only. We will resize the images when it's time to batch them only.
###Code
img_tfms = [PILFlip(0.5)]
tfms = [PILImage.create, [parent_label, Categorize()]]
dsrc = DataSource(items, tfms, filts=split_idx, ds_tfms=img_tfms)
tfms = [Cuda(), ByteToFloatTensor(), Normalize(*imagenet_stats)]
bs = 64
###Output
_____no_output_____
###Markdown
We use a sampler that will group the images by batches of the close size and aspect ratio (with a bit of shuffle for the training set) and a collation function that will resize them to the mdeian aspect ratio and median number of pixel (bound by `max_px`). `rand_min_scale` is used to do a `RandomResizedCrop` to that size on the training set.
###Code
samp = SortARSampler(dsrc.train, shuffle=True, bs=bs)
collate_fn = ResizeCollate(max_px=128*128, rand_min_scale=0.35, rand_ratio_pct=0.33, round_mult=32)
train_dl = TfmdDL(dsrc.train, tfms, num_workers=8, batch_sampler=samp, collate_fn=collate_fn)
samp = SortARSampler(dsrc.valid, shuffle=False, bs=bs)
collate_fn = ResizeCollate(max_px=128*128, round_mult=32)
valid_dl = TfmdDL(dsrc.valid, tfms, num_workers=8, batch_sampler=samp, collate_fn=collate_fn)
###Output
_____no_output_____
###Markdown
Then we create a `DataBunch` with those two dataloaders.
###Code
#dbch1 = imagenette.databunch(source, bs=64, num_workers=8, ds_tfms=ds_img_tfms, dl_tfms=Normalize(*imagenet_stats))
dbch = DataBunch(train_dl, valid_dl)
dbch.show_batch(max_n=9)
###Output
_____no_output_____
###Markdown
Training The training then works exactly as before.
###Code
learn = cnn_learner(xresnet18, dbch, LabelSmoothingCrossEntropy(), opt_func=opt_func, c_in=3, c_out=10, lr=1e-2, metrics=accuracy)
learn.fit_one_cycle(1)
###Output
_____no_output_____
###Markdown
Tutorial: Training a model on Imagenette> Regular training with square images and rectangular training Square training Loading the data with `DataSource` To load the data with the mdeium-level API `DataSource`, we need to gather all the images and define some way to split them betweem training and validation.
###Code
source = untar_data(URLs.IMAGENETTE_160)
items = get_image_files(source)
split_idx = GrandparentSplitter(valid_name='val')(items)
###Output
_____no_output_____
###Markdown
Then we detail the type transforms (applied to the items to form a tuple) and the dataset transforms. For our inputs we use `PILImage.create` and for our targets, the `paent_label` function to convert a filename to its class, followed by `Categorize`.The dataset transforms contain data augmentation using PIL and a resize to 128.
###Code
tfms = [PILImage.create, [parent_label, Categorize()]]
ds_img_tfms = [ToTensor(), PILFlip(0.5), RandomResizedCrop(128, min_scale=0.35)]
###Output
_____no_output_____
###Markdown
We can then pass all of this informtaiton to `DataSource`.
###Code
dsrc = DataSource(items, tfms, filts=split_idx)
###Output
_____no_output_____
###Markdown
To convert our `DataSource` to a `DataBunch`, we need to indicate the transforms we want to use at the batch level, here putting on the GPU aith `Cuda`, converting the tensors of bytes to float then normalizing using the traditional imagenet statistics.
###Code
imagenet_stats = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
imagenet_stats = broadcast_vec(1, 4, *imagenet_stats)
dl_tfms = [Cuda(), ByteToFloatTensor(), Normalize(*imagenet_stats)]
dbch = dsrc.databunch(after_item=ds_img_tfms, after_batch=dl_tfms, bs=64, num_workers=0)
dbch.show_batch(max_n=9)
###Output
_____no_output_____
###Markdown
Loading the data with `DataBlock` An easier way is to use the `DataBlock` higher-level API. We just need to specify the types, how to get the items, how to split them and how to label to build an Imagenette datablock.
###Code
imagenette = DataBlock(ts=(PILImage, Category),
get_items=get_image_files,
splitter=GrandparentSplitter(valid_name='val'),
get_y=parent_label)
###Output
_____no_output_____
###Markdown
We can then directly call the `databunch` method when specifying a source (where the items are) and the non-default dataset and dataloader transforms. To check which transforms are included by default (inferred from the types passed), we can check (and poentially modify) the attributes `default_type_tfms`, `default_ds_tfms` and `default_dl_tfms` of the `imagenette` object.
###Code
imagenette.default_type_tfms,imagenette.default_ds_tfms,imagenette.default_dl_tfms
###Output
_____no_output_____
###Markdown
Here we need to add the data augmentation and resize, as well as the normalization.
###Code
dbch = imagenette.databunch(source, bs=64, num_workers=8, ds_tfms=ds_img_tfms, dl_tfms=Normalize(*imagenet_stats))
dbch.show_batch(max_n=9)
###Output
_____no_output_____
###Markdown
Training The following function will give us a `Learner` to train a model on imagenette.
###Code
def cnn_learner(arch, dbunch, loss_func, opt_func, c_in=None, c_out=None,
lr=1e-2, progress=True, mixup=0, xtra_cb=None, **kwargs):
cbfs = [MixedPrecision]
arch_args = {}
if not c_in : c_in = data.c_in
if not c_out: c_out = data.c_out
if c_in: arch_args['c_in' ]=c_in
if c_out: arch_args['c_out']=c_out
return Learner(dbunch, arch(**arch_args), loss_func, opt_func=opt_func, lr=lr, cb_funcs=cbfs, **kwargs)
opt_func = partial(Adam, wd=0.01, eps=1e-3)
###Output
_____no_output_____
###Markdown
To use label smoothing, we define a custom loss function.
###Code
class LabelSmoothingCrossEntropy(Module):
def __init__(self, eps:float=0.1, reduction='mean'): self.eps,self.reduction = eps,reduction
def forward(self, output, target):
c = output.size()[-1]
log_preds = F.log_softmax(output, dim=-1)
if self.reduction=='sum': loss = -log_preds.sum()
else:
loss = -log_preds.sum(dim=-1)
if self.reduction=='mean': loss = loss.mean()
return loss*self.eps/c + (1-self.eps) * F.nll_loss(log_preds, target, reduction=self.reduction)
learn = cnn_learner(xresnet18, dbch, LabelSmoothingCrossEntropy(), opt_func=opt_func, c_in=3, c_out=10, lr=1e-2, metrics=accuracy)
###Output
_____no_output_____
###Markdown
Then we can train our model.
###Code
learn.fit_one_cycle(1)
###Output
_____no_output_____
###Markdown
Rect training (not working well) For a rectangular training, we change the dataset transforms to use the flip only. We will resize the images when it's time to batch them only.
###Code
#img_tfms = [PILFlip(0.5)]
#tfms = [PILImage.create, [parent_label, Categorize()]]
#dsrc = DataSource(items, tfms, filts=split_idx, ds_tfms=img_tfms)
#tfms = [Cuda(), ByteToFloatTensor(), Normalize(*imagenet_stats)]
#bs = 64
###Output
_____no_output_____
###Markdown
We use a sampler that will group the images by batches of the close size and aspect ratio (with a bit of shuffle for the training set) and a collation function that will resize them to the mdeian aspect ratio and median number of pixel (bound by `max_px`). `rand_min_scale` is used to do a `RandomResizedCrop` to that size on the training set.
###Code
#samp = SortARSampler(dsrc.train, shuffle=True, bs=bs)
#collate_fn = ResizeCollate(max_px=128*128, rand_min_scale=0.35, rand_ratio_pct=0.33, round_mult=32)
#train_dl = TfmdDL(dsrc.train, tfms, num_workers=8, batch_sampler=samp, collate_fn=collate_fn)
#samp = SortARSampler(dsrc.valid, shuffle=False, bs=bs)
#collate_fn = ResizeCollate(max_px=128*128, round_mult=32)
#valid_dl = TfmdDL(dsrc.valid, tfms, num_workers=8, batch_sampler=samp, collate_fn=collate_fn)
###Output
_____no_output_____
###Markdown
Then we create a `DataBunch` with those two dataloaders.
###Code
#dbch1 = imagenette.databunch(source, bs=64, num_workers=8, ds_tfms=ds_img_tfms, dl_tfms=Normalize(*imagenet_stats))
#dbch = DataBunch(train_dl, valid_dl)
#dbch.show_batch(max_n=9)
###Output
_____no_output_____
###Markdown
Training The training then works exactly as before.
###Code
#learn = cnn_learner(xresnet18, dbch, LabelSmoothingCrossEntropy(), opt_func=opt_func, c_in=3, c_out=10, lr=1e-2, metrics=accuracy)
#learn.fit_one_cycle(1)
###Output
_____no_output_____
###Markdown
Tutorial: Training a model on Imagenette> Regular training with square images and rectangular training Square training Loading the data with `DataSource` To load the data with the mdeium-level API `DataSource`, we need to gather all the images and define some way to split them betweem training and validation.
###Code
source = untar_data(URLs.IMAGENETTE_160)
items = get_image_files(source)
split_idx = GrandparentSplitter(valid_name='val')(items)
###Output
_____no_output_____
###Markdown
Then we detail the type transforms (applied to the items to form a tuple) and the dataset transforms. For our inputs we use `PILImage.create` and for our targets, the `paent_label` function to convert a filename to its class, followed by `Categorize`.The dataset transforms contain data augmentation using PIL and a resize to 128.
###Code
tfms = [PILImage.create, [parent_label, Categorize()]]
ds_img_tfms = [ToTensor(), PILFlip(0.5), RandomResizedCrop(128, min_scale=0.35)]
###Output
_____no_output_____
###Markdown
We can then pass all of this informtaiton to `DataSource`.
###Code
dsrc = DataSource(items, tfms, filts=split_idx)
###Output
_____no_output_____
###Markdown
To convert our `DataSource` to a `DataBunch`, we need to indicate the transforms we want to use at the batch level, here putting on the GPU aith `Cuda`, converting the tensors of bytes to float then normalizing using the traditional imagenet statistics.
###Code
imagenet_stats = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
imagenet_stats = broadcast_vec(1, 4, *imagenet_stats)
dl_tfms = [Cuda(), ByteToFloatTensor(), Normalize(*imagenet_stats)]
dbch = dsrc.databunch(after_item=ds_img_tfms, after_batch=dl_tfms, bs=64, num_workers=0)
dbch.show_batch(max_n=9)
###Output
_____no_output_____
###Markdown
Loading the data with `DataBlock` An easier way is to use the `DataBlock` higher-level API. We just need to specify the types, how to get the items, how to split them and how to label to build an Imagenette datablock.
###Code
imagenette = DataBlock(ts=(PILImage, Category),
get_items=get_image_files,
splitter=GrandparentSplitter(valid_name='val'),
get_y=parent_label)
###Output
_____no_output_____
###Markdown
We can then directly call the `databunch` method when specifying a source (where the items are) and the non-default dataset and dataloader transforms. To check which transforms are included by default (inferred from the types passed), we can check (and poentially modify) the attributes `default_type_tfms`, `default_ds_tfms` and `default_dl_tfms` of the `imagenette` object.
###Code
imagenette.default_type_tfms,imagenette.default_ds_tfms,imagenette.default_dl_tfms
###Output
_____no_output_____
###Markdown
Here we need to add the data augmentation and resize, as well as the normalization.
###Code
dbch = imagenette.databunch(source, bs=64, num_workers=8, ds_tfms=ds_img_tfms, dl_tfms=Normalize(*imagenet_stats))
dbch.show_batch(max_n=9)
###Output
_____no_output_____
###Markdown
Training The following function will give us a `Learner` to train a model on imagenette.
###Code
def cnn_learner(arch, dbunch, loss_func, opt_func, c_in=None, c_out=None,
lr=1e-2, progress=True, mixup=0, xtra_cb=None, **kwargs):
cbfs = [MixedPrecision]
arch_args = {}
if not c_in : c_in = data.c_in
if not c_out: c_out = data.c_out
if c_in: arch_args['c_in' ]=c_in
if c_out: arch_args['c_out']=c_out
return Learner(arch(**arch_args), dbunch, loss_func, opt_func=opt_func, lr=lr, cb_funcs=cbfs, **kwargs)
opt_func = partial(Adam, wd=0.01, eps=1e-3)
###Output
_____no_output_____
###Markdown
To use label smoothing, we define a custom loss function.
###Code
class LabelSmoothingCrossEntropy(Module):
def __init__(self, eps:float=0.1, reduction='mean'): self.eps,self.reduction = eps,reduction
def forward(self, output, target):
c = output.size()[-1]
log_preds = F.log_softmax(output, dim=-1)
if self.reduction=='sum': loss = -log_preds.sum()
else:
loss = -log_preds.sum(dim=-1)
if self.reduction=='mean': loss = loss.mean()
return loss*self.eps/c + (1-self.eps) * F.nll_loss(log_preds, target, reduction=self.reduction)
learn = cnn_learner(xresnet18, dbch, LabelSmoothingCrossEntropy(), opt_func=opt_func, c_in=3, c_out=10, lr=1e-2, metrics=accuracy)
###Output
_____no_output_____
###Markdown
Then we can train our model.
###Code
learn.fit_one_cycle(1)
###Output
_____no_output_____
###Markdown
Rect training (not working well) For a rectangular training, we change the dataset transforms to use the flip only. We will resize the images when it's time to batch them only.
###Code
img_tfms = [PILFlip(0.5)]
tfms = [PILImage.create, [parent_label, Categorize()]]
dsrc = DataSource(items, tfms, filts=split_idx, ds_tfms=img_tfms)
tfms = [Cuda(), ByteToFloatTensor(), Normalize(*imagenet_stats)]
bs = 64
###Output
_____no_output_____
###Markdown
We use a sampler that will group the images by batches of the close size and aspect ratio (with a bit of shuffle for the training set) and a collation function that will resize them to the mdeian aspect ratio and median number of pixel (bound by `max_px`). `rand_min_scale` is used to do a `RandomResizedCrop` to that size on the training set.
###Code
samp = SortARSampler(dsrc.train, shuffle=True, bs=bs)
collate_fn = ResizeCollate(max_px=128*128, rand_min_scale=0.35, rand_ratio_pct=0.33, round_mult=32)
train_dl = TfmdDL(dsrc.train, tfms, num_workers=8, batch_sampler=samp, collate_fn=collate_fn)
samp = SortARSampler(dsrc.valid, shuffle=False, bs=bs)
collate_fn = ResizeCollate(max_px=128*128, round_mult=32)
valid_dl = TfmdDL(dsrc.valid, tfms, num_workers=8, batch_sampler=samp, collate_fn=collate_fn)
###Output
_____no_output_____
###Markdown
Then we create a `DataBunch` with those two dataloaders.
###Code
#dbch1 = imagenette.databunch(source, bs=64, num_workers=8, ds_tfms=ds_img_tfms, dl_tfms=Normalize(*imagenet_stats))
dbch = DataBunch(train_dl, valid_dl)
dbch.show_batch(max_n=9)
###Output
_____no_output_____
###Markdown
Training The training then works exactly as before.
###Code
learn = cnn_learner(xresnet18, dbch, LabelSmoothingCrossEntropy(), opt_func=opt_func, c_in=3, c_out=10, lr=1e-2, metrics=accuracy)
learn.fit_one_cycle(1)
###Output
_____no_output_____
###Markdown
Tutorial: Training a model on Imagenette> Regular training with square images and rectangular training Square training Loading the data with `DataSource` To load the data with the mdeium-level API `DataSource`, we need to gather all the images and define some way to split them betweem training and validation.
###Code
source = untar_data(URLs.IMAGENETTE_160)
items = get_image_files(source)
split_idx = GrandparentSplitter(valid_name='val')(items)
###Output
_____no_output_____
###Markdown
Then we detail the type transforms (applied to the items to form a tuple) and the dataset transforms. For our inputs we use `PILImage.create` and for our targets, the `paent_label` function to convert a filename to its class, followed by `Categorize`.The dataset transforms contain data augmentation using PIL and a resize to 128.
###Code
tfms = [PILImage.create, [parent_label, Categorize()]]
ds_img_tfms = [ToTensor(), PILFlip(0.5), RandomResizedCrop(128, min_scale=0.35)]
###Output
_____no_output_____
###Markdown
We can then pass all of this informtaiton to `DataSource`.
###Code
dsrc = DataSource(items, tfms, filts=split_idx)
###Output
_____no_output_____
###Markdown
To convert our `DataSource` to a `DataBunch`, we need to indicate the transforms we want to use at the batch level, here putting on the GPU aith `Cuda`, converting the tensors of bytes to float then normalizing using the traditional imagenet statistics.
###Code
imagenet_stats = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
imagenet_stats = broadcast_vec(1, 4, *imagenet_stats)
dl_tfms = [Cuda(), ByteToFloatTensor(), Normalize(*imagenet_stats)]
dbch = dsrc.databunch(after_item=ds_img_tfms, after_batch=dl_tfms, bs=64, num_workers=0)
dbch.show_batch(max_n=9)
###Output
_____no_output_____
###Markdown
Loading the data with `DataBlock` An easier way is to use the `DataBlock` higher-level API. We just need to specify the types, how to get the items, how to split them and how to label to build an Imagenette datablock.
###Code
imagenette = DataBlock(ts=(PILImage, Category),
get_items=get_image_files,
splitter=GrandparentSplitter(valid_name='val'),
get_y=parent_label)
###Output
_____no_output_____
###Markdown
We can then directly call the `databunch` method when specifying a source (where the items are) and the non-default dataset and dataloader transforms. To check which transforms are included by default (inferred from the types passed), we can check (and poentially modify) the attributes `default_type_tfms`, `default_ds_tfms` and `default_dl_tfms` of the `imagenette` object.
###Code
imagenette.default_type_tfms,imagenette.default_ds_tfms,imagenette.default_dl_tfms
###Output
_____no_output_____
###Markdown
Here we need to add the data augmentation and resize, as well as the normalization.
###Code
dbch = imagenette.databunch(source, bs=64, num_workers=8, ds_tfms=ds_img_tfms, dl_tfms=Normalize(*imagenet_stats))
dbch.show_batch(max_n=9)
###Output
_____no_output_____
###Markdown
Training The following function will give us a `Learner` to train a model on imagenette.
###Code
def cnn_learner(arch, dbunch, loss_func, opt_func, c_in=None, c_out=None,
lr=1e-2, progress=True, mixup=0, xtra_cb=None, **kwargs):
cbfs = [MixedPrecision]
arch_args = {}
if not c_in : c_in = data.c_in
if not c_out: c_out = data.c_out
if c_in: arch_args['c_in' ]=c_in
if c_out: arch_args['c_out']=c_out
return Learner(arch(**arch_args), dbunch, loss_func, opt_func=opt_func, lr=lr, cb_funcs=cbfs, **kwargs)
opt_func = partial(Adam, wd=0.01, eps=1e-3)
###Output
_____no_output_____
###Markdown
To use label smoothing, we define a custom loss function.
###Code
class LabelSmoothingCrossEntropy(Module):
def __init__(self, eps:float=0.1, reduction='mean'): self.eps,self.reduction = eps,reduction
def forward(self, output, target):
c = output.size()[-1]
log_preds = F.log_softmax(output, dim=-1)
if self.reduction=='sum': loss = -log_preds.sum()
else:
loss = -log_preds.sum(dim=-1)
if self.reduction=='mean': loss = loss.mean()
return loss*self.eps/c + (1-self.eps) * F.nll_loss(log_preds, target, reduction=self.reduction)
learn = cnn_learner(xresnet18, dbch, LabelSmoothingCrossEntropy(), opt_func=opt_func, c_in=3, c_out=10, lr=1e-2, metrics=accuracy)
###Output
_____no_output_____
###Markdown
Then we can train our model.
###Code
learn.fit_one_cycle(1)
###Output
_____no_output_____
###Markdown
Rect training (not working well) For a rectangular training, we change the dataset transforms to use the flip only. We will resize the images when it's time to batch them only.
###Code
img_tfms = [PILFlip(0.5)]
tfms = [PILImage.create, [parent_label, Categorize()]]
dsrc = DataSource(items, tfms, filts=split_idx, ds_tfms=img_tfms)
tfms = [Cuda(), ByteToFloatTensor(), Normalize(*imagenet_stats)]
bs = 64
###Output
_____no_output_____
###Markdown
We use a sampler that will group the images by batches of the close size and aspect ratio (with a bit of shuffle for the training set) and a collation function that will resize them to the mdeian aspect ratio and median number of pixel (bound by `max_px`). `rand_min_scale` is used to do a `RandomResizedCrop` to that size on the training set.
###Code
samp = SortARSampler(dsrc.train, shuffle=True, bs=bs)
collate_fn = ResizeCollate(max_px=128*128, rand_min_scale=0.35, rand_ratio_pct=0.33, round_mult=32)
train_dl = TfmdDL(dsrc.train, tfms, num_workers=8, batch_sampler=samp, collate_fn=collate_fn)
samp = SortARSampler(dsrc.valid, shuffle=False, bs=bs)
collate_fn = ResizeCollate(max_px=128*128, round_mult=32)
valid_dl = TfmdDL(dsrc.valid, tfms, num_workers=8, batch_sampler=samp, collate_fn=collate_fn)
###Output
_____no_output_____
###Markdown
Then we create a `DataBunch` with those two dataloaders.
###Code
#dbch1 = imagenette.databunch(source, bs=64, num_workers=8, ds_tfms=ds_img_tfms, dl_tfms=Normalize(*imagenet_stats))
dbch = DataBunch(train_dl, valid_dl)
dbch.show_batch(max_n=9)
###Output
_____no_output_____
###Markdown
Training The training then works exactly as before.
###Code
learn = cnn_learner(xresnet18, dbch, LabelSmoothingCrossEntropy(), opt_func=opt_func, c_in=3, c_out=10, lr=1e-2, metrics=accuracy)
learn.fit_one_cycle(1)
###Output
_____no_output_____ |
china_population_py.ipynb | ###Markdown
###Code
import pandas as pd
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data = pd.read_csv('china_population.csv')
data.head()
ab_number = data.loc[data["Type"] == "Number"]
df = pd.pivot_table(ab_number, index = ['Age group'], columns = ['Gender'],values = '1960').reset_index()
df
#single image
#define x and y limits
y = range(0, len(df))
x_male = df['Male']
x_female = df['Female']
#define plot parameters
fig, axes = plt.subplots(ncols=2, sharey=True, figsize=(9, 6))
#specify background color and plot title
fig.patch.set_facecolor('xkcd:light grey')
plt.figtext(.5,.9,'China Population ' + "1960", fontsize=15, ha='center')
#define male and female bars
axes[0].barh(y, x_male, align='center', color='royalblue')
axes[0].set(title='Males')
axes[1].barh(y, x_female, align='center', color='lightpink')
axes[1].set(title='Females')
#adjust grid parameters and specify labels for y-axis
axes[1].grid()
axes[0].set(yticks=y, yticklabels=df['Age group'])
axes[0].invert_xaxis()
axes[0].grid()
plt.savefig('china_population_plot/1960.png')
#display plot
plt.show()
for i in range(1960, 2020):
df = pd.pivot_table(ab_number, index = ['Age group'], columns = ['Gender'],values = str(i) ).reset_index()
#define x and y limits
y = range(0, len(df))
x_male = df['Male']
x_female = df['Female']
#define plot parameters
fig, axes = plt.subplots(ncols=2, sharey=True, figsize=(9, 6))
#specify background color and plot title
fig.patch.set_facecolor('xkcd:light grey')
plt.figtext(.5,.9,'China Population ' + str(i), fontsize=15, ha='center')
#define male and female bars
axes[0].barh(y, x_male, align='center', color='royalblue')
axes[0].set(title='Males')
axes[1].barh(y, x_female, align='center', color='lightpink')
axes[1].set(title='Females')
#adjust grid parameters and specify labels for y-axis
axes[1].grid()
axes[0].set(yticks=y, yticklabels=df['Age group'])
axes[0].invert_xaxis()
axes[0].grid()
#output
output_path = 'china_population_plot_numbers'
filepath = os.path.join(output_path, str(i) +'_number.jpg')
plt.savefig(filepath, dpi=200)
#display plot
plt.show()
# #remove all files
# import os
# import glob
# files = glob.glob('china_population_plot_numbers/*')
# for f in files:
# os.remove(f)
import os
import imageio
png_dir = 'china_population_plot_numbers'
images = []
for file_name in sorted(os.listdir(png_dir)):
if file_name.endswith('.jpg'):
file_path = os.path.join(png_dir, file_name)
images.append(imageio.imread(file_path))
imageio.mimsave('animation/china_population_numbers.gif', images)
###Output
_____no_output_____ |
.ipynb_checkpoints/infectious-disease-2001-2014-checkpoint.ipynb | ###Markdown
data.world Infectious Disease 2001-2014 > Setup > Before running data.world notebooks for the first time, you'll need to: 1. Install data.world's Python package, including optional `pandas` dependencies: ```shellpip install git+git://github.com/datadotworld/data.world-py.gitegg=project[pandas]```1. Obtain an API access token at https://data.world/settings/advanced1. Store API access token using the `dw` command-line tool: ```shelldw configure```> Once your environment is set up, these steps do not need to be repeated for other data.world notebooks.
###Code
import matplotlib.pyplot as plt
%matplotlib notebook
# use the 'seaborn-colorblind' style
plt.style.use('seaborn-colorblind')
import seaborn as sns
import pandas as pd
import datadotworld as dw
# Datasets are referenced by their path
dataset_key = 'health/infectious-disease-2001-2014'
# Or simply by their URL
dataset_key = 'https://data.world/health/infectious-disease-2001-2014'
# Load dataset (onto the local file system)
dataset_local = dw.load_dataset(dataset_key) # cached under ~/.dw/cache
# See what is in it
dataset_local.describe()
###Output
_____no_output_____
###Markdown
Next steps- Run `help()` to learn more ways to access and use your data. Try: - `help(dw.load_dataset)` - `help(dw.query)`- Learn more at: https://github.com/datadotworld/data.world-py and https://docs.data.world
###Code
dataset_local.dataframes
for i in dataset_local.dataframes:
dataset_local.tables[i].to_csv(i+'.csv')
counties_table = dataset_local.tables['counties']
counties_table
diseases_branches_table = dataset_local.tables['diseases_branches']
diseases_branches_table
SQLs = {1: 'select year, disease, count from rows where rows.disease in'
'("HIV", "Gonorrhea", "Early Syphilis") and year<2014',
2: 'select year, disease, rate from rows where rows.disease in'
'("HIV", "Gonorrhea", "Early Syphilis") and year<2014'}
dw.query(dataset_key, SQLs[1]).dataframe.groupby(['year','disease']).sum().unstack(fill_value=0).plot()
dw.query(dataset_key, SQLs[1]).dataframe.groupby(['year','disease']).sum().unstack(fill_value=0).plot()
###Output
_____no_output_____
###Markdown
So, STIs were ingreasing while HIV dropped year by year. Let's check this hypothesis if rate?
###Code
dw.query(dataset_key, SQLs[2]).dataframe.groupby(['year','disease']).sum().unstack(fill_value=0).plot.box();
dw.query(dataset_key, SQLs[2]).dataframe.groupby(['year','disease']).sum().unstack(fill_value=0)
results.dataframe.groupby(['disease']).count()
pd.tools.plotting.scatter_matrix(iris);
df1 = pd.read_csv('https://query.data.world/s/32k7h876aprvdxf33i9yponf2')
df1.to_csv('diseases_26.csv')
df1.head()
df1[(df1.County == 'Sacramento') & (df1.Disease == 'HIV') & (df1.Year == 2013)].head()
###Output
_____no_output_____
###Markdown
Calculating normalized relative changes of rates
###Code
df3 = df1['Disease', 'County', 'Year', 'Rate']
df1.County.unique()
len(df1.County.unique())
df1.groupby?
df1[['Year', 'County', 'Disease', 'Count']].groupby(['County', 'Disease', 'Year']).sum().plot()
df1[['Year', 'County', 'Disease', 'Count']].groupby(['County', 'Disease', 'Year']).sum().unstack(fill_value=0).T
df1[['Year', 'County', 'Disease', 'Count']].groupby(['County', 'Disease', 'Year']).sum().unstack(fill_value=0).T.plot()
# Very slow means calculating:
df_means =
df1[['Year', 'County', 'Disease', 'Count']]
df1[(df1.Disease=='HIV') & (df1.County =='Butte')]
aa = df1[(df1.Sex == 'Total') & (df1.Sex == '')].groupby(['Year', 'Disease']).sum()['Count']
aa
aaa = pd.DataFrame(aa[2014].sort_values(ascending = False))
aaa.columns = [2014]
aa[2005]
aaa = pd.DataFrame()
kwargs = {"2007" : lambda x: aa[2005]}
aaa = aaa.assign(**kwargs)
aaa
df1 = df1.assign(e=p.Series(np.random.randn(sLength)).values)
aaa
pd.DataFrame(aa[2014].sort_values(ascending = False))
[[i, aa[i].sort_values(ascending = False)[:15]] for i in [2010,2011]]
pd.DataFrame([[i, aa[i].sort_values(ascending = False)[:15]] for i in [2010,2011]])
range(len(tmp[yr].sort_values(ascending = False)[:15]))
pd.DataFrame(aa[2004]).append(aaa)
pd.DataFrame([aa[2004],aaa])
aa[2004][:10]
pd.DataFrame(aa[2004]).reset_index()
df1[df1.Sex == 'Total'].groupby(['Year', 'Disease']).sum()['Count']
df_populations = df1
#1. List of TOP Diseases
import numpy as np
['T'+str(i) for i in np.arange(1,11,1)]
df1.Year.unique()[:-1]
df_populations = pd.DataFrame(columns = ['Year']+['T'+str(i) for i in np.arange(1,11,1)]).set_index('Year')
def get_population(year, county, sex = 'Total'):
"""
Returns population in given county at given year with given sex.
"""
return df1[(df1.Year == year) & (df1.Sex == sex)\
& (df1.Disease == 'HIV')\
& (df1.County == county)]['Population'].iloc[0]
def get_rate(year, county, sex = 'Total')
get_population(2005, 'Sierra', sex = 'Total')
df1[(df1.Year == 2002) & (df1.Disease == 'HIV') & (df1.County == 'Sierra')]['Population']
df_populations
df4 = df1[['Year', 'County', 'Disease', 'Count']]
df1[df1.Sex == 'Total' & df1.County ==]['Population']
###Output
_____no_output_____ |
economic-data/Economic-Data-Text-Exploration.ipynb | ###Markdown
This notebook, which serves as the preliminary stage of model development, supports text/data cleaning and exploration procedures. This includes stemming, stop-word analysis, tockenization, special characters, collocation analysis and so forth.
###Code
import pandas as pd
import numpy as np
import scipy as sp
import nltk
import re
import gc
import pickle
import os
from sklearn import decomposition
from sklearn.model_selection import train_test_split
import seaborn
import matplotlib.pyplot as plt
import json
econdata = pd.read_csv("./data/economic-newstext-data.csv", encoding="latin-1", engine='python', header=0)
econdata.shape
###Output
_____no_output_____
###Markdown
Text/Data Cleaning
###Code
#Recognize columns with high-null values (beyond 50% threshold) for dropping courtesy extensive manipulation
nulls = econdata.apply(lambda s: sum(s.isnull())/len(econdata))
print(nulls[nulls>0.5])
econdata.drop(nulls[nulls>0.5].index.values, axis=1, inplace=True)
econdata.drop(econdata.loc[econdata.relevance=="not sure"].index, inplace=True)
econdata['relev'] = econdata['relevance'].apply(lambda s: 1 if s=="yes" else 0)
econdata.drop(['articleid', 'date', 'lineid', 'previous_sentence', 'next_sentence', '_unit_id', '_golden',
'_unit_state'], axis=1, inplace=True)
econdata.drop(econdata[econdata.text.isnull()].index, inplace=True)
econdata.headline.values[0], econdata.text.values[0]
#Text Cleaning: Removal special characters, case removal, stemming
def ascii_correction(s):
s = s.encode('ascii', 'ignore').decode("utf-8").replace('\t', " ").replace("\\'", " ")
s = re.sub("[^a-zA-Z0-9.,%']", " ", s).replace(" ", " ").replace(",","")
return s.lower().replace("u.s.", "usa").replace("u.s", "usa") #To not interfere with period_correction
#Period correction: cleaner separation of words, unless period used as decimal point
def period_correction(s):
potential = [pos for pos, char in enumerate(s) if char=="." and pos!=(len(s)-1)]
num = ['0','1','2','3','4','5','6','7','8','9','10']
error = [pos for pos in potential if s[pos-1] not in num or s[pos+1] not in num]
if len(error)==0: return s
def recursive(arr):
if len(arr)==1: return " . "+s[arr[0]+1:]
else: return " . "+s[arr[0]+1:arr[1]] + recursive(arr[1:])
finalstr = s[:error[0]] + recursive(error)
return finalstr.replace(" "," ")
for col in ['headline', 'text']: econdata[col]=econdata[col].apply(lambda s: period_correction(ascii_correction(s)))
econdata.headline.values[0], econdata.text.values[0]
###Output
_____no_output_____
###Markdown
Text Exploration
###Code
#Word Frequency Overall and across Relevance
head = econdata.headline.apply(lambda s: s.split()).values
#Monoid flattening techniques, not suitable for production but theoretically elegant
headrel = sum(head[econdata.relevance==1], [])
headnotrel = sum(head[econdata.relevance==0], [])
head = [word for case in head for word in case]
text = econdata.text.apply(lambda s: s.split()).values
textrel = sum(text[econdata.relevance==1], [])
textnotrel = sum(text[econdata.relevance==0], [])
text = [word for case in text for word in case]
words = [head, text]
titles = ["Headline", "Text"]
for i in range(len(titles)):
plt.figure(figsize=(10,4))
plt.title(titles[i])
nltk.probability.FreqDist(words[i]).plot(100)
# Collocations and Concordance
sas = [nltk.Text(i) for i in words]
for i in range(len(titles)):
print(titles[i])
print('; '.join(sas[i].collocation_list()))
#Dispersion plots
#These dispersion plots reveal information for all our articles combined together. Hence, even though
#the "dispersion" across articles is not insightful, its implications on frequency is
#Bigrams from our collocation analysis also seem to appear at similar points through articles, as needed
for i in range(len(titles)):
print(titles[i])
sas[i].dispersion_plot([word for bigram in sas[i].collocation_list() for word in bigram.split()])
###Output
Headline
###Markdown
Dataset Separation We would be building a two-layered model - a classification to predict relevance, and a classification/regression to predict extent/level of positivity for the relevant articles from first layer. The relevant splits into train-test sets are made, to proceed to (i) traditional NLP (trigams, SVD) and (ii) advanced language models (LSTM, BERT)
###Code
econdata['headline'] = econdata['headline'].apply(lambda s: s+"." if s[-1]!="." else s)
econdata['text'] = econdata['headline'] + " " + econdata['text']
econdata['text'] = econdata['text'].apply(lambda s: s+"." if s[-1]!="." else s)
econdata.drop(['_trusted_judgments', 'headline', '_last_judgment_at', 'relevance:confidence'], axis=1, inplace=True)
relX, relY = econdata['text'], econdata['relev']
posdata = econdata[econdata['relev']==1]
posX, posY = posdata["text"], posdata[["positivity", "positivity:confidence"]]
reltrainX, reltestX, reltrainY, reltestY = train_test_split(relX, relY, random_state=1)
postrainX, postestX, postrainY, postestY = train_test_split(posX, posY, random_state=1)
print(reltrainX.shape, reltestX.shape, reltrainY.shape, reltestY.shape)
print(postrainX.shape, postestX.shape, postrainY.shape, postestY.shape)
models = ["relevance", "positivity"]
files = ["trainX", "testX", "trainY", "testY"]
data = [[reltrainX, reltestX, reltrainY, reltestY], [postrainX, postestX, postrainY, postestY]]
for mdl in range(len(models)):
for fl in range(len(files)):
pickle.dump(data[mdl][fl].reset_index(drop=True), open("./data/"+models[mdl]+"_"+files[fl]+".pkl", "wb"))
###Output
_____no_output_____ |
.ipynb_checkpoints/LetsWinEGG2-checkpoint.ipynb | ###Markdown
KMeans
###Code
# Applying KMeans on tfidf
# the labels_ give assignment of doc to the cluster number
km = KMeans(n_clusters=nb_clusters)
km.fit(tfidf)
cluster = km.labels_
cluster_partition = [cluster[:limits[0]],cluster[limits[0]:limits[1]],cluster[limits[1]:limits[2]],cluster[limits[2]:limits[3]]]
part_km = []
for i in range(0, len(limits)):
dash = km.fit(partitions_tfidf[i])
part_km.append(dash)
#(part_km[1]).labels_
#
#all_labels = []
#for i in range(0,len(limits)):
# for j in range(0, len((part_km[i]).labels_)):
# doc_clustering is a dictionnary
# it looks like -> { doc_number : [partition_number, cluster_number] }
# This is used to reassign doc number to their respective partition and and cluster
doc_clustering = {}
for i in range(0,len(usable)):
if i < limits[0]:
doc_clustering[i] = [0, cluster[i]]
elif i >= limits[0] and i < limits[1]:
doc_clustering[i] = [1, cluster[i]]
elif i >= limits[1] and i < limits[2]:
doc_clustering[i] = [2, cluster[i]]
else:
doc_clustering[i] = [3, cluster[i]]
# Allows to get list of documents number
# return [dou numbers]
# params : partition_number , cluster number
partitions = []
def get_doc(part, clust):
docs = []
for i in range(0,len(doc_clustering)):
if doc_clustering[i][0] == part and doc_clustering[i][1] == clust:
docs.append(i)
return docs
# Get the partitions variable
# Here partitions[part][cluster] = list of docs numbe
partitions = []
for i in range(0, len(limits)):
clusters = []
for j in range(0, nb_clusters):
clusters.append(get_doc(i,j))
partitions.append(clusters)
# example of output for doc_clustering
# doc 465 is in cluster 1 of the partition 1
# doc 154 is in cluster 2 of the partition 0
print(doc_clustering[465])
print(doc_clustering[154])
print()
# Here, just count docs number by cluster.
print(Counter(cluster_partition[0]))
print(Counter(cluster_partition[1]))
print(Counter(cluster_partition[2]))
print(Counter(cluster_partition[3]))
###Output
_____no_output_____
###Markdown
Quality Measure
###Code
# INSERT QUALITY MEASURE HERE
###Output
_____no_output_____
###Markdown
Khi²
###Code
# tf_of_your_word = tf[numDoc][strWord]
tf = []
for doc in usable:
tf_doc = {}
for word in vectorizer.get_feature_names():
tf_doc[word] = doc.count(word)
tf.append(tf_doc)
# Number total of words
# nb_total_word[numPartition]
nb_total_word = []
nb = 0
for numDoc in range(0, len(usable)):
for word in vectorizer.get_feature_names():
nb += tf[numDoc][word]
if numDoc+1 in limits:
nb_total_word.append(nb)
nb=0
nb_total_word
tf[0]
# nb_word[num_partition][word]
nb_word = []
word_in_this_parti = {}
for word in vectorizer.get_feature_names():
word_in_this_parti[word] = 0
for numDoc in range(0, len(usable)):
for word in vectorizer.get_feature_names():
word_in_this_parti[word] += tf[numDoc][word]
if numDoc+1 in limits:
nb_word.append(word_in_this_parti)
word_in_this_parti = {}
for word in vectorizer.get_feature_names():
word_in_this_parti[word] = 0
len(nb_word)
# nb_word_by_cluster[numPartition][numCluster]
nb_word_by_cluster = []
for parti in partitions:
nb_word_clus = []
for cluster in parti:
nb = 0
for numDoc in cluster:
for word in vectorizer.get_feature_names():
nb += tf[numDoc][word]
nb_word_clus.append(nb)
nb_word_by_cluster.append(nb_word_clus)
# Expected values, if nothing were dependant
# exp[numPartition][numCluster][numWord]
#exp = []
#for numParti in range(0, len(partitions)):
# exp_clus = []
# for numCluster in range(0, len(partitions[numParti])):
# exp_word = []
# for numWord in range(0, vectorizer.get_feature_names()):
# exp_word.append((nb_word[numParti][numWord] + nb_word_by_cluster[numPart][numCluster]) / nb_total_word[numParti])
# exp_cluster.append(exp_word)
# exp.append(exp_clus)
# value_of_khi2 = khi2[numPartition][numCluster][word]
khi2 = []
for numParti in range(0, len(partitions)):
khi2parti = []
for numCluster in range(0, len(partitions[numParti])):
khi2cluster = {}
for word in vectorizer.get_feature_names():
word_in_this_parti[word] = 0
E = nb_word[numParti][word]
E =+ nb_word_by_cluster[numParti][numCluster]
E = E/ nb_total_word[numParti]
N = 0
for numDoc in partitions[numParti][numCluster]:
N += tf[numDoc][word]
khi2cluster[word] = (pow(N - E, 2)/E)
khi2parti.append(khi2cluster)
khi2.append(khi2parti)
# list of your labels = labels[numPartition][numCluster]
labels = []
for numPartition in range(0, len(nb_word_by_cluster)):
label_clus = []
for numCluster in range(0, len(nb_word_by_cluster[numPartition])):
label_clus.append(Counter(khi2[numPartition][numCluster]).most_common(5))
labels.append(label_clus)
###Output
_____no_output_____
###Markdown
KMeans & Silhouette Score
###Code
# Applying KMeans on tfidf
# the labels_ give assignment of doc to the cluster number
# SOURCE :
# https://scikit-learn.org/stable/auto_examples/cluster/plot_kmeans_silhouette_analysis.html
# Silhouette analysis can be used to study the separation distance between the resulting clusters.
# The silhouette plot displays a measure of how close each point in one cluster is to points in
# the neighboring clusters and thus provides a way to assess parameters like number of clusters visually.
# This measure has a range of [-1, 1].
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
def kmeans_silhouette(samples, k):
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(random_state = 1 ,n_clusters=k, max_iter=800, init='k-means++', n_init=50, n_jobs=-1)
cluster_labels = clusterer.fit_predict(samples)
silhouette_avg = silhouette_score(samples, cluster_labels)
sample_silhouette_values = silhouette_samples(samples, cluster_labels)
return {'k':k, 'cluster':clusterer, 'labels':cluster_labels ,'silhouette_avg':silhouette_avg, 'sample_silhouette_values':sample_silhouette_values }
def kmeans_silhouette_range(samples, k_min, k_max):
kmeans_silhouette_range_value = {}
for k in range(k_min, k_max):
kmeans_silhouette_range_value[k] = kmeans_silhouette(samples, k)
return kmeans_silhouette_range_value
def display_kmeans_silhouette(kmeans_silhouette_value):
clusterer = kmeans_silhouette_value['cluster']
cluster_labels = kmeans_silhouette_value['labels']
silhouette_avg = kmeans_silhouette_value['silhouette_avg']
sample_silhouette_values = kmeans_silhouette_value['sample_silhouette_values']
n_cluster = kmeans_silhouette_value['k']
print("For n_cluster =", n_cluster,
"The average silhouette_score is :", silhouette_avg)
# Create a subplot with 1 row and 2 columns
fig, (ax1) = plt.subplots(1, 1)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(cluster_labels) + (n_cluster + 1) * 10])
y_lower = 10
for i in range(n_cluster):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.nipy_spectral(float(i) / n_cluster)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
plt.suptitle(("Silhouette analysis for cluster = %d" % n_cluster),
fontsize=14, fontweight='bold')
plt.show()
X = tfidf.toarray()
k_s_r = kmeans_silhouette_range(X, 8, 20)
k_s= kmeans_silhouette(X, 10)
display_kmeans_silhouette(k_s)
from matplotlib.ticker import FuncFormatter
import pandas as pd
import numpy as np
def display_time_distribution_cluster(labels, labels_string):
label_limit = ['2004-2007', '2007-2010','2010-2013','2013-2016', '2016-2018']
limit_index = 0
zeros = np.zeros((10, 5))
df = pd.DataFrame(zeros, columns=label_limit)
for i in range(len(labels)):
if i <= limits[limit_index]:
df.at[labels[i],label_limit[limit_index]] += 1
else:
limit_index += 1
df.at[labels[i],label_limit[limit_index]] += 1
# To percent
df = df.apply(lambda x: x / x.sum(), axis=1)
# Plot
df.plot.barh(figsize=(20, 30), width=0.9)
plt.title('Time distribution by cluster', fontsize= 24, color='gray')
## PlotSwagg ##
plt.yticks(fontsize=14, rotation=0, color='gray')
plt.xticks(fontsize=14, rotation=0, color='gray')
# Cleanest Percent
plt.gca().xaxis.set_major_formatter(FuncFormatter(lambda x, _: '{:.0%}'.format(x)))
plt.gca().yaxis.set_major_formatter(FuncFormatter(lambda y, _: '{}'.format(" ".join(labels_string[y]))))
# Less border
plt.gca().xaxis.grid(True)
plt.gca().spines['top'].set_visible(False)
plt.gca().spines['bottom'].set_visible(False)
plt.gca().spines['left'].set_edgecolor('gray')
plt.gca().spines['right'].set_edgecolor('gray')
# Percent line under the barH
plt.gca().set_axisbelow(True)
plt.show()
# it looks like -> { doc_number : [partition_number, cluster_number] }
doc_clustering = {}
cluster_labels = k_s['labels']
numDoc = 0
for i in range(0, len(limits)):
previousBound = 0
if i > 0:
previousBound = limits[i-1]
clusterer = kmeans_silhouette(partitions_tfidf[i], nb_cluster)["labels"]
for numDocItern in range(0, limits[i]-previousBound):
doc_clustering[numDoc] = [i, clusterer[numDocItern]]
numDoc+=1
len(doc_clustering)
# Allows to get list of documents number
# return [dou numbers]
# params : partition_number , cluster number
def get_doc(part, clust):
docs = []
for i in range(0,len(doc_clustering)):
if doc_clustering[i][0] == part and doc_clustering[i][1] == clust:
docs.append(i)
return docs
# Get the partitions variable
# Here partitions[part][cluster] = list of docs numbe
partitions = []
for i in range(0, len(limits)):
clusters = []
for j in range(0, nb_cluster):
clusters.append(get_doc(i,j))
partitions.append(clusters)
partitions
###Output
_____no_output_____
###Markdown
Khi²
###Code
# tf_of_your_word = tf[numDoc][strWord]
tf = []
for doc in usable:
tf_doc = {}
for word in vectorizer.get_feature_names():
tf_doc[word] = doc.count(word)
tf.append(tf_doc)
# Number total of words
# nb_total_word[numPartition]
nb_total_word = []
nb = 0
for numDoc in range(0, len(usable)):
for word in vectorizer.get_feature_names():
nb += tf[numDoc][word]
if numDoc+1 in limits:
nb_total_word.append(nb)
nb=0
nb_total_word
tf[0]
# nb_word[num_partition][word]
nb_word = []
word_in_this_parti = {}
for word in vectorizer.get_feature_names():
word_in_this_parti[word] = 0
for numDoc in range(0, len(usable)):
for word in vectorizer.get_feature_names():
word_in_this_parti[word] += tf[numDoc][word]
if numDoc+1 in limits:
nb_word.append(word_in_this_parti)
word_in_this_parti = {}
for word in vectorizer.get_feature_names():
word_in_this_parti[word] = 0
len(nb_word)
# nb_word_by_cluster[numPartition][numCluster]
nb_word_by_cluster = []
for parti in partitions:
nb_word_clus = []
for cluster in parti:
nb = 0
for numDoc in cluster:
for word in vectorizer.get_feature_names():
nb += tf[numDoc][word]
nb_word_clus.append(nb)
nb_word_by_cluster.append(nb_word_clus)
# value_of_khi2 = khi2[numPartition][numCluster][word]
khi2 = []
for numParti in range(0, len(partitions)):
khi2parti = []
for numCluster in range(0, len(partitions[numParti])):
khi2cluster = {}
for word in vectorizer.get_feature_names():
if nb_word_by_cluster[numParti][numCluster] == 0:
khi2cluster[word] = 0
else:
word_in_this_parti[word] = 0
E = nb_word[numParti][word]
E =+ nb_word_by_cluster[numParti][numCluster]
E = E/ nb_total_word[numParti]
N = 0
for numDoc in partitions[numParti][numCluster]:
N += tf[numDoc][word]
khi2cluster[word] = (pow(N - E, 2)/E)
khi2parti.append(khi2cluster)
khi2.append(khi2parti)
# list of your labels = labels[numPartition][numCluster]
labels = []
for numPartition in range(0, len(nb_word_by_cluster)):
label_clus = []
for numCluster in range(0, len(nb_word_by_cluster[numPartition])):
label_clus.append(Counter(khi2[numPartition][numCluster]).most_common(nb_labels))
labels.append(label_clus)
# Some clusters can be empty, so they have a score of 0 on each labels
labels
###Output
_____no_output_____
###Markdown
Diachronic Analysis
###Code
labels_cluster = []
for i in range(nb_cluster):
labels_cluster.append('')
for j in range(len(labels[0][i])):
labels_cluster[i] += ''.join([t for t in labels[0][i][j][0] ]) +' '
labels_cluster
# Low level analysis
display_time_distribution_cluster(k_s['labels'], labels_cluster)
def inter(listA, listB):
return np.intersect1d(listA, listB)
# cluster_t and cluster_s must be in two different partitions
def proba(num_cluster_t, num_cluster_s, num_partition_T, num_partition_S):
total_inter = 0
total_t = 0
for f in range(0, len(labels[num_partition_T][num_cluster_t])):
for f_s in labels[num_partition_S][num_cluster_s]:
if labels[num_partition_T][num_cluster_t][f][0] == f_s[0]:
total_inter += labels[num_partition_T][num_cluster_t][f][1]
break
total_t += labels[num_partition_T][num_cluster_t][f][1]
if total_t == 0:
return 0
return total_inter / total_t
def P_A(num_cluster_s, num_partition_T, num_partition_S):
# first, we have to know what are the cluster which got the label
total = 0
nb_computation = 0
for label_s in labels[num_partition_S][num_cluster_s]:
for num_cluster_t in range(0, len(partitions[num_partition_T])):
if label_s in labels[num_partition_T][num_cluster_t]:
total += proba(num_cluster_t, num_cluster_s, num_partition_T, num_partition_S)
nb_computation += 1
if nb_computation == 0:
return 0
return total / nb_computation
# Define a coeficient for the activity
def activity(num_partition_S, num_partition_T):
res = 0
for num_cluster_s in range(0, len(partitions[num_partition_S])):
res += P_A(num_cluster_s, num_partition_T, num_partition_S)
return res / len(partitions[num_partition_S])
# Ecart-type, but it isn't very usefull xD
sigma_t = 0.01
sigma_s = 0.01
# Our Graal
def similar(num_cluster_t, num_partition_T, num_cluster_s, num_partition_S):
cond1 = proba(num_cluster_t, num_cluster_s, num_partition_T, num_partition_S) > P_A(num_cluster_s, num_partition_T, num_partition_S)
cond2 = proba(num_cluster_t, num_cluster_s, num_partition_T, num_partition_S) > activity(num_partition_S, num_partition_T) + sigma_s
cond3 = proba(num_cluster_t, num_cluster_s, num_partition_T, num_partition_S) > P_A(num_cluster_s, num_partition_T, num_partition_S)
cond4 = proba(num_cluster_t, num_cluster_s, num_partition_T, num_partition_S) > activity(num_partition_T, num_partition_S) + sigma_t
return cond1 and cond2 and cond3 and cond4
# Plot the diachronic analysis results
# Node coloring source: https://stackoverflow.com/questions/13517614/draw-different-color-for-nodes-in-networkx-based-on-their-node-value#13517947
# The more the partition is high, the more the partition is recent
val_map = {}
values = []
g = nx.Graph() #nx.DiGraph(directed=True)
for n_part in range(0, len(limits)):
for n_clus in range(0, nb_cluster):
node_str = '('+str(n_clus)+','+str(n_part)+')'
g.add_node(node_str, posxy=(n_part, n_clus), partition = n_part)
val_map[node_str] = n_part/len(limits)
values = [val_map.get(node, 0.25) for node in g.nodes()]
for numParti in range(0, len(partitions)-1):
for num_cluster_t in range(0, nb_cluster):
for num_cluster_s in range(0, nb_cluster):
if similar(num_cluster_t, numParti, num_cluster_s, numParti+1):
#print("("+str(num_cluster_t)+","+str(numParti)+") est similaire à ("+str(num_cluster_s)+","+str(numParti+1)+")")
g.add_edges_from([("("+str(num_cluster_t)+","+str(numParti)+")", "("+str(num_cluster_s)+","+str(numParti+1)+")")])
positions = nx.get_node_attributes(g,'posxy')
options = {
'node_color': 'blue',
'node_size': 100,
'width': 3,
'arrowstyle': '-|>',
'arrowsize': 12,
}
nx.draw(g, positions, node_size=3000/nb_cluster, cmap=plt.get_cmap('jet'), node_color=values, arrowsize=20, arrows=True, arrowstyle="-|>", arraowsize=12)
pos = nx.circular_layout(g)
print(labels[0][1])
labels[0][1][0][1]
###Output
[('model', 4794.631419922505), ('modele', 2206.226925540483), ('prediction', 1952.437198413677), ('temporel', 1092.3023669530028), ('present', 916.0246783494716), ('graph', 755.2493974505952), ('resultat', 755.2493974505952), ('pouvoir', 480.2060587668072), ('commun', 480.2060587668072), ('decision', 480.2060587668072), ('cluster', 480.2060587668072), ('concept', 480.2060587668072), ('serie', 365.93800098189547), ('base', 365.93800098189547), ('article', 365.93800098189547)]
|
data/Data Processing/DealingWithTheData.ipynb | ###Markdown
Getting a sense of that the datasets contains
###Code
airports.columns
airports.usage.unique()
airports['size'].unique()
ports.columns
ports.harborsize.unique()
ports.fillna('None').railway.unique()
ports.railway.unique()
countries.columns
globalData.columns
globalData.loc[0]
print('Maximum:',globalData.all_commodities_export.max(),'\nMinimum:',globalData.all_commodities_export.min())
plt.hist(globalData.all_commodities_export)
plt.show()
plt.hist(np.log(globalData.all_commodities_export))
plt.show()
print('Maximum:',globalData.normalised_export_2017.max(),'\nMinimum:',globalData.normalised_export_2017.min())
print('Maximum:',globalData.all_commodities_import.max(),'\nMinimum:',globalData.all_commodities_import.min())
plt.hist(globalData.all_commodities_import)
plt.show()
print('Maximum:',globalData.normalised_import_2017.max(),'\nMinimum:',globalData.normalised_import_2017.min())
plt.hist(np.log(1+globalData.all_commodities_import))
plt.show()
print('Maximum:',globalData.passengers_2017.max(),'\nMinimum:',globalData.passengers_2017.min())
plt.hist(globalData.passengers_2017)
plt.show()
plt.hist(np.log(1+globalData.passengers_2017))
plt.show()
print('Maximum:',globalData.normalised_passengers_2017.max(),'\nMinimum:',globalData.normalised_passengers_2017.min())
print('Maximum:',globalData.freight_2017.max(),'\nMinimum:',globalData.freight_2017.min())
plt.hist(globalData.freight_2017)
plt.show()
plt.hist(np.log(1+globalData.freight_2017))
plt.show()
print('Maximum:',globalData.normalised_freight_2017.max(),'\nMinimum:',globalData.normalised_freight_2017.min())
###Output
Maximum: 10.635676343672985
Minimum: 0.0
###Markdown
Let's write some csv's! First the top importers/exporters
###Code
globalData
top_five_importers = globalData.sort_values(by=['all_commodities_import'],ascending=False)[['all_commodities_import','name','iso3']]
top_five_importers = top_five_importers.reset_index(drop=True).loc[0:4]
top_five_importers = top_five_importers[['name','all_commodities_import','iso3']]
top_five_importers['all_commodities_import'] = top_five_importers['all_commodities_import'].astype(int)
top_five_importers.columns = ['name','value','code']
top_five_importers['name'] = cleanName(list(top_five_importers.name))
top_five_importers
top_five_exporters = globalData.sort_values(by=['all_commodities_export'],ascending=False)[['all_commodities_export','name','iso3']]
top_five_exporters = top_five_exporters.reset_index(drop=True).loc[0:4]
top_five_exporters = top_five_exporters[['name','all_commodities_export','iso3']]
top_five_exporters['all_commodities_export'] = (top_five_exporters['all_commodities_export']).astype(int)
top_five_exporters.columns = ['name','value','code']
top_five_exporters['name'] = cleanName(list(top_five_exporters.name))
top_five_exporters
top_five_importers.to_csv(basePath+'Layers/StylingDataDriven/top_five_importers.csv',header=True,index=False)
top_five_exporters.to_csv(basePath+'Layers/StylingDataDriven/top_five_exporters.csv',header=True,index=False)
###Output
_____no_output_____
###Markdown
Now the top 5 busiest airports
###Code
airports.columns
airports.head()
top_five_airports = airports[airports.busiest_airport_ranking!=0].sort_values(by=['busiest_airport_ranking'],ascending=True)\
[['airport_name','iata_code','amount_passed_through','busiest_airport_ranking']]
top_five_airports = top_five_airports.reset_index(drop=True).loc[0:4]
top_five_airports = top_five_airports[['airport_name','amount_passed_through','iata_code']]
top_five_airports.columns = ['name','value','code']
top_five_airports['value'] = top_five_airports['value'].round(2)
top_five_airports['name'] = cleanName(list(top_five_airports.name),"Int'l")
top_five_airports
top_five_airports.to_csv(basePath+'Layers/StylingDataDriven/top_five_airports.csv',header=True,index=False)
###Output
_____no_output_____
###Markdown
On to the busiest ports
###Code
ports.columns
top_five_ports = ports[ports.busiest_ports_ranking!=0].sort_values(by=['busiest_ports_ranking'],ascending=True)\
[['port_name','amount_shipped_through','busiest_ports_ranking']]
top_five_ports = top_five_ports.reset_index(drop=True).loc[0:4]
top_five_ports = top_five_ports[['port_name','amount_shipped_through','port_name']]
top_five_ports.columns = ['name','value','code']
top_five_ports['name'] = cleanName(list(top_five_ports.name))
top_five_ports
top_five_ports.to_csv(basePath+'Layers/StylingDataDriven/top_five_ports.csv',header=True,index=False)
###Output
_____no_output_____
###Markdown
UK airports
###Code
ukairports.head()
ukairports.columns
top_five_ukairports = ukairports.sort_values(by=['total_freight'],ascending=False)[['airport_name','total_freight','iata_code']]
top_five_ukairports = top_five_ukairports.reset_index(drop=True).loc[0:4]
top_five_ukairports.columns = ['name','value','code']
top_five_ukairports['value'] = top_five_ukairports['value'].round(2)
top_five_ukairports['name'] = cleanName(list(top_five_ukairports.name),"Int'l")
top_five_ukairports
top_five_ukairports.to_csv(basePath+'Layers/StylingDataDriven/top_five_ukairports.csv',header=True,index=False)
###Output
_____no_output_____
###Markdown
Finally, the UK ports
###Code
ukports.head()
ukports.columns
top_five_ukports = ukports.sort_values(by=['Total'],ascending=False)[['Total','port_name','port_name']]
top_five_ukports.index=np.arange(top_five_ukports.shape[0])
top_five_ukports = top_five_ukports.loc[0:4]
top_five_ukports.columns = ['Total','port_name_1','port_name_2']
top_five_ukports = top_five_ukports[['port_name_1','Total','port_name_2']]
top_five_ukports.columns = ['name','value','code']
top_five_ukports['value'] = top_five_ukports['value'].round(2)
top_five_ukports['name'] = cleanName(list(top_five_ukports.name))
top_five_ukports
top_five_ukports.to_csv(basePath+'Layers/StylingDataDriven/top_five_ukports.csv',header=True,index=False)
# Getthe the top-five trading partners of UK
exportPartners, theirShare = getTradingPartners('United Kingdom','2015','export',basePath=basePath)
exportPartners
theirShare
topfiveTradingPartnersExport = pd.DataFrame(index=np.arange(5),columns=['name','value','code'])
topfiveTradingPartnersExport['value'] = [partner[1] for partner in exportPartners]
topfiveTradingPartnersExport['value'] = topfiveTradingPartnersExport['value'].round(2)
topfiveTradingPartnersExport['name'] = [partner[0] for partner in exportPartners]
topfiveTradingPartnersExport['code'] = [[iso3 for iso3,name in zip(countries.GU_A3,countries.NAME)\
if partner[0] in name.lower()][0] for partner in exportPartners]
topfiveTradingPartnersExport['name'] = cleanName(list(topfiveTradingPartnersExport.name))
topfiveTradingPartnersExport
topfiveTradingPartnersExport.to_csv('../Layers/StylingDataDriven/top_five_exporters_uk.csv',header=True,index=True)
# Getthe the top-five trading partners of UK
importPartners, theirShare_I = getTradingPartners('United Kingdom','2015','import',basePath=basePath)
importPartners
theirShare_I
topfiveTradingPartnersImport = pd.DataFrame(index=np.arange(5),columns=['name','value','code'])
topfiveTradingPartnersImport['value'] = [partner[1] for partner in importPartners]
topfiveTradingPartnersImport['value'] = topfiveTradingPartnersImport['value'].round(2)
topfiveTradingPartnersImport['name'] = [partner[0] for partner in importPartners]
topfiveTradingPartnersImport['code'] = [[iso3 for iso3,name in zip(countries.GU_A3,countries.NAME)\
if partner[0] in name.lower()][0] for partner in importPartners]
topfiveTradingPartnersImport['name'] = cleanName(list(topfiveTradingPartnersImport.name))
topfiveTradingPartnersImport
topfiveTradingPartnersImport.to_csv('../Layers/StylingDataDriven/top_five_importers_uk.csv',header=True,index=True)
###Output
_____no_output_____ |
custom-loss/Training_Models_with_Unequal_Economic_Error_Costs_in_SageMaker.ipynb | ###Markdown
Training Models with Unequal Economic Error Costs in Amazon SageMaker*** Table of Contents1. [Introduction](introduction)1. [Background and Solution Overview](background) 1. [Importing Libraries](libraries) 1. [Defining Helper Functions](helpers) 1. [Importing Data](import_data) 1. [Preparing_Data](process_data)1. [Defining a Custom Loss Function](custom_loss)1. [Training the Model](train)1. [Building the Docker Image](docker_image)1. [Executing the Amazon SageMaker Training Job](sagemaker_train)1. [Making Predictions](predictions)1. [Analyzing the Results](score)1. [Conclusion](conclusion) The blog post associated with this notebook is located [here](https://aws.amazon.com/blogs/machine-learning/training-models-with-unequal-economic-error-costs-using-amazon-sagemaker/). Introduction Many companies are turning to machine learning (ML) to improve customer and business outcomes. They use the power of ML models built over “big data” to identify patterns and find correlations. Then they can identify appropriate approaches or predict likely outcomes based on data about new instances. However, as ML models are approximations of the real world, some of these predictions will likely be in error. In some applications all types of prediction errors are truly equal in impact. In other applications, one kind of error can be much more costly or consequential than another – measured in absolute or relative terms, in dollars, time, or something else. For example, predicting someone does not have breast cancer when they do (a false negative error) will, according to medical estimates, likely have much greater cost or consequences than the reverse error. We may even be willing to tolerate more false positive errors if we sufficiently reduce the false negatives to compensate. In this blog post, we address applications with unequal error costs with the goal of reducing undesirable errors while providing greater transparency to the trade-offs being made. We show you how to train a model in Amazon SageMaker for a binary classification problem in which the costs of different kinds of misclassification are very different. To explore this tradeoff, we show you how to write a custom loss function – the metric that evaluates how well a model makes predictions – that incorporates asymmetric misclassification costs. We then show you how to train an Amazon SageMaker Build Your Own Model using that loss function. Further, we show how to evaluate the errors made by the model and how to compare models trained with different relative costs so that you can identify the model with the best economic outcome overall.The advantage of this approach is that it makes an explicit link between an ML model’s outcomes and errors and the business’ framework for decision-making. This approach requires the business to explicitly state its cost matrix, based on the specific actions to be taken on the predictions. The business can then evaluate the economic consequences of the model predictions on their overall processes, the actions taken based on the predictions, and their associated costs. This evaluation process moves well beyond simply assessing the classification results of the model. This approach can drive challenging discussions in the business, and force differing implicit decisions and valuations onto the table for open discussion and agreement. Background and Solution Overview Although model training always aims to minimize errors, most models are trained to assume that all types of errors are equal. However, what if we know that the costs of different types of errors are not equal? For example, let's take a sample model trained on [UCI'S breast cancer diagnostic data set](https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+%28Diagnostic%29).[1](note_one) Clearly, a false positive prediction (predicting this person has breast cancer, when they do not) has very different consequences than a false negative prediction (predicting this person does not have breast cancer, when they do). In the first case, the consequence is an extra round of screening. In the second case, the cancer might be at a more advanced stage before it's discovered. To quantify these consequences they are often discussed in terms of their relative cost, which then allows trade-offs to be made. While we can debate what the exact costs of a false negative or a false positive prediction should be, we believe we’d all agree that they're not the same - although ML models are generally trained as if they are. We can use a custom cost function to evaluate a model and see the economic impact of the errors the model is making (utility analysis). Elkan[2](note_two) showed that applying a cost function to the results of a model can be used to compensate for imbalanced samples when used in standard Bayesian and decision tree learning methods (for example: few loan defaults, versus a large sample of repaid loans). The custom function can also be used to perform this same compensation.We can also have the model “shift” its predictions in a fashion that reflects the difference in cost, by providing the costs of different types of errors to the model during training using a custom loss function. So, for example, in the breast cancer example we'd like the model to make fewer false negative errors and are willing to accept more false positives to achieve that goal. We may even be willing to give up some “correct” predictions in order to have fewer false negatives. At least, we'd like to understand the trade-offs we can make here. In our example, we'll use costs from the healthcare industry.[3](note_three),[4](note_four) In addition, we'd like to understand in how many cases the model's predictions are “almost” predicted as something else. For example, binary models use a cutoff (say, 0.5) to classify a score as “True” or "False." How many of our cases were in fact very close to the cut-off? Are our false negatives classified that way because their score was 0.499999? These details can’t be seen in the usual representations of confusion matrices or AUC measures. To help address these questions, we have developed a novel, graphical representation of the model predictions that allows us to examine these details, without depending on a specific threshold.In fact, there are likely cases where a model trained to avoid specific types of errors would begin to specialize in differentiating errors. Imagine a neural network that's been trained to believe that all misrecognitions of street signs are equal.[5](note_five) Now, imagine a neural network that's been trained that misrecognizing a stop sign as a sign for speed limit 45 mph is a far worse error than confusing two speed limit signs. It's reasonable to expect that the neural network would begin to recognize different features. We believe this is a promising research direction. We use Amazon SageMaker to build and host our model. Amazon SageMaker is a fully-managed platform that enables developers and data scientists to quickly and easily build, train, and deploy machine learning models at any scale. We author and analyze the model in a Jupyter notebook hosted on an Amazon SageMaker notebook instance, then build and deploy an endpoint for online predictions, using its “Bring Your Own Model” capability.Note that while the terms "cost function" and "loss function" are often used interchangeably, we differentiate between them in this post, and provide examples of each:* We use a "loss function" to train the model. Here, we specify the different weights of different kinds of errors. The relative weight of the errors is of most importance here.* We use a "cost function" to evaluate the economic impact of the model. For the cost function, we can specify the cost (or value) of correct predictions, as well as the cost of errors. Here, dollar costs are most appropriately used.This distinction allows us to further refine the model's behavior or to reflect differing influences from different constituencies. Although in this model we'll use the same set of costs (quality adjusted life years, QALY) for both functions, you could, for example, use relative QALY for the loss function, and costs of providing care for the cost function.We’ll break up this problem into three parts:1. In "Defining a custom loss function," we show how to build a custom loss function that weights different errors unequally. The relative costs of the prediction errors are provided as hyperparameters at runtime, allowing the effects of different weightings on the model to be explored and compared. We build and demonstrate the use of a custom cost function to evaluate our “vanilla” model, which is trained to assume that all errors are equal.2. In “Training the model,” we demonstrate how to train a model by using the custom loss function. We emulate and extend a [sample notebook](https://github.com/awslabs/amazon-sagemaker-examples/tree/master/introduction_to_applying_machine_learning/breast_cancer_prediction), which uses the UCI breast cancer diagnostic data set.3. In “Analyzing the results,” we show how we can compare the models to better understand the distribution of predictions as compared to our threshold. We'll see that by training the model to avoid certain kinds of errors, we'll affect the distributions so that the model differentiates more effectively between its positive and negative predictions.We are building our own model and not using one of the Amazon SageMaker built-in algorithms. This means that we can make use of the Amazon SageMaker ability to train any custom model as long as it’s packaged in a Docker container with the image of that container available in Amazon Elastic Container Registry (Amazon ECR). For details on how to train a custom model on Amazon SageMaker, see [this post](https://aws.amazon.com/blogs/machine-learning/train-and-host-scikit-learn-models-in-amazon-sagemaker-by-building-a-scikit-docker-container/) or the various [sample notebooks available](https://github.com/awslabs/amazon-sagemaker-examples/tree/master/advanced_functionality).Bibliography:1. Dua, D. and Karra Taniskidou, E. (2017). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, School of Information and Computer Science.2. Elkan, Charles. “The Foundations of Cost-Sensitive Learning.” In International Joint Conference on Artificial Intelligence, 17:973–978. Lawrence Erlbaum Associates Ltd, 2001.3. Wu, Yirong, Craig K. Abbey, Xianqiao Chen, Jie Liu, David C. Page, Oguzhan Alagoz, Peggy Peissig, Adedayo A. Onitilo, and Elizabeth S. Burnside. “Developing a Utility Decision Framework to Evaluate Predictive Models in Breast Cancer Risk Estimation.” Journal of Medical Imaging 2, no. 4 (October 2015). https://doi.org/10.1117/1.JMI.2.4.041005.4. Abbey, Craig K., Yirong Wu, Elizabeth S. Burnside, Adam Wunderlich, Frank W. Samuelson, and John M. Boone. “A Utility/Cost Analysis of Breast Cancer Risk Prediction Algorithms.” Proceedings of SPIE--the International Society for Optical Engineering 9787 (February 27, 2016). 5. Eykholt, Kevin, Ivan Evtimov, Earlence Fernandes, Bo Li, Amir Rahmati, Chaowei Xiao, Atul Prakash, Tadayoshi Kohno, and Dawn Song. “Robust Physical-World Attacks on Deep Learning Models.” ArXiv:1707.08945 [Cs], July 27, 2017. http://arxiv.org/abs/1707.08945. Setup To set up the environment necessary to run this example in your own AWS account, first follow Steps 0 and 1 in this [previously published blog post](https://aws.amazon.com/blogs/machine-learning/simulate-quantum-systems-on-amazon-sagemaker/) to set up an Amazon SageMaker instance and add the AmazonEC2ContainerRegistryFullAccess policy to the SageMakerExecutionRole. Then, as in Step 2, open a terminal to clone our Git repo into your Amazon SageMaker notebook instance.The repo contains a directory named "container" that has all the components necessary to build and use a Docker image of the algorithm we run in this blog post. You can find more information on the individual components in [this Amazon SageMaker sample notebook](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/advanced_functionality/scikit_bring_your_own/scikit_bring_your_own.ipynb). For our purposes, there are two files that are most relevant and contain all the information to run our workload.1. Dockerfile. This file describes how to build your Docker container image. Here you can define the dependencies of your code, for example, which language you are using (Python), what packages your code needs (for example, TensorFlow), and so on. More details can be found [here](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/advanced_functionality/scikit_bring_your_own/scikit_bring_your_own.ipynb).1. custom_loss/train. This file is executed when Amazon SageMaker runs the container for training. It contains the Python code that defines the binary classifier model, the custom loss function used to train the model and Keras training job. We describe this code in more detail below. Importing Libraries
###Code
import keras
import io
import os
import time
import sagemaker.amazon.common as smac
import sagemaker as sage
import boto3 as aws
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report, roc_curve, roc_auc_score, precision_recall_fscore_support, accuracy_score
import matplotlib.pyplot as plt
from IPython.core.display import display, HTML
%matplotlib inline
role = sage.get_execution_role()
###Output
_____no_output_____
###Markdown
The S3 folder that will contain both the training data and model output is called 'custom-loss-sagemaker' here. You will need to create your own S3 bucket with a globally unique name. Create a bucket with a name like 'custom-loss-sagemaker' + your name. Once that bucket is created, update the bucket variable below.
###Code
bucket = 'custom-loss-sagemaker' # UPDATE THIS
prefix = 'custom-loss-blog-post'
###Output
_____no_output_____
###Markdown
This is the name that will be associated with the Docker image created in this notebook.
###Code
image_name = 'custom-loss'
###Output
_____no_output_____
###Markdown
This cutoff (aka threshold) will be used to construct the confusion matrices and to color the distribution plots in the "Analyzing the Results" section of this notebook.
###Code
cutoff = 0.5
###Output
_____no_output_____
###Markdown
Defining Helper Functions In this section, we define several helper functions that calculate various metrics used to judge the quality of the model. See top of each function for a detailed description of its purpose.
###Code
def custom_pred_distro(positives, negatives, cutoff=0.5, title=None):
'''This function generates distributions of predicted scores for actual positives and actual negatives.
Note that the cutoff argument only affects the coloring of the graphs. It does NOT affect any model
results or predicted values.'''
fig, axes = plt.subplots(2,1, figsize=(10,8))
axes[0].spines['top'].set_visible(False)
axes[0].spines['right'].set_visible(False)
axes[0].set(xlim=[0,1], xticks=np.arange(0, 1, step=0.1), xlabel='Model Score', ylabel='Count', title='Actual Negatives')
axes[0].hist(negatives[negatives>cutoff], color='C1', label='False Positives', bins=30)
axes[0].hist(negatives[negatives<=cutoff], label='True Negatives', bins=30)
axes[0].legend()
axes[1].spines['top'].set_visible(False)
axes[1].spines['right'].set_visible(False)
axes[1].set(xlim=[0,1], xticks=np.arange(0, 1, step=0.1), xlabel='Model Score', ylabel='Count', title='Actual Positives')
axes[1].hist(positives[positives>cutoff], label='True Positives', bins=30)
axes[1].hist(positives[positives<=cutoff], label='False Negatives', bins=30)
axes[1].legend()
if title is not None:
fig.suptitle(title, fontsize=16, fontweight='bold', x=0.52)
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
else:
plt.tight_layout()
return None
def expected_value(positives, negatives, v_tn, v_fp, v_fn, v_tp, cutoff=0.5):
'''This function calculates the expected value of the next test example. To do this, the function must calculate
the prob of classification/misclassification errors and that requires using a cutoff value to make discrete predictions.'''
tp = (positives > cutoff).sum()
fn = (positives <= cutoff).sum()
tn = (negatives < cutoff).sum()
fp = (negatives >= cutoff).sum()
pos = fn + tp
neg = fp + tn
total = tn + fp + fn + tp
fpr = fp/neg
fnr = fn/pos
n = neg/total
p = pos/total
#given truth is negative
ev_n = v_tn * (1-fpr) + v_fp * (fpr)
#given truth is positive
ev_p = v_tp * (1-fnr) + v_fn * (fnr)
#total expected value
ev = ev_n * n + ev_p * p
output = {
'fpr': fpr,
'fnr': fnr,
'n': n,
'p': p,
'ev_n': ev_n,
'ev_p': ev_p,
'ev': ev
}
return output
def conf_matrix(positives, negatives, cutoff, title):
'''This function draws a confusion matrix, using our cutoff.'''
tp = (positives > cutoff).sum()
fn = (positives <= cutoff).sum()
tn = (negatives < cutoff).sum()
fp = (negatives >= cutoff).sum()
output = {
'Predicted Negatives': [tn, fn],
'Predicted Positives': [fp, tp]
}
df_table = pd.DataFrame(output, index=['Actual Negatives', 'Actual Positives'])
display(HTML('<b>' + title + '</b>'))
display(HTML(df_table.to_html()))
###Output
_____no_output_____
###Markdown
Importing the Data Here we import the breast cancer screening data used to construct our model.
###Code
data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data', header = None)
# specify columns extracted from wbdc.names
data.columns = ["id","diagnosis","radius_mean","texture_mean","perimeter_mean","area_mean","smoothness_mean",
"compactness_mean","concavity_mean","concave points_mean","symmetry_mean","fractal_dimension_mean",
"radius_se","texture_se","perimeter_se","area_se","smoothness_se","compactness_se","concavity_se",
"concave points_se","symmetry_se","fractal_dimension_se","radius_worst","texture_worst",
"perimeter_worst","area_worst","smoothness_worst","compactness_worst","concavity_worst",
"concave points_worst","symmetry_worst","fractal_dimension_worst"]
# save the data
data.to_csv("data.csv", sep=',', index=False)
# print the shape of the data file
print(data.shape)
# show the top few rows
display(data.head())
# describe the data object
display(data.describe())
# we will also summarize the categorical field diganosis
display(data.diagnosis.value_counts())
###Output
(569, 32)
###Markdown
Preparing the Data We split the data into training and test sets.
###Code
X = data.drop('id', axis=1)
y = X.pop('diagnosis')
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=1)
###Output
_____no_output_____
###Markdown
We standardize the data since variance differs drastically across features and map the target variable to numeric values.
###Code
scaler = StandardScaler().fit(x_train)
x_train_scld = scaler.transform(x_train)
x_test_scld = scaler.transform(x_test)
label_map = {'M': 1, 'B': 0 }
y_train = y_train.map(label_map)
y_test = y_test.map(label_map)
training_data = np.concatenate((y_train.values.reshape((426,1)), x_train_scld), axis=1)
###Output
_____no_output_____
###Markdown
Since Amazon SageMaker expects the training data to be read from an S3 bucket, we save the training data to the S3 bucket defined at the top of this notebook.
###Code
#first save the training data as a csv locally
training_data_name = 'training_data.csv'
np.savetxt(training_data_name, training_data, delimiter=',')
#then load data into S3
aws.Session().resource('s3').Bucket(bucket).Object(os.path.join(prefix, 'train', training_data_name))\
.upload_file(training_data_name)
###Output
_____no_output_____
###Markdown
Defining a Custom Loss Function We now construct a loss function that weighs false positive errors differently from false negatives one. To do this, we build a binary classifier in Keras to use Keras' ability to accommodate user-defined loss functions.To build a loss function in Keras, we define a Python function that takes model predictions and ground-truth as arguments and returns a scalar value. In the custom function, we input the cost associated with a false negative error (fn_cost) and with a false positive error (fp_cost). Note that internally the loss function must use Keras backend functions to perform any calculations.The following function defines the loss of a single prediction as the difference between the prediction's ground-truth class and the predicted value weighted by the cost associated with misclassifying an observation from that ground-truth class. The total loss is the unweighted average of all of these losses. This is a relatively simple loss function, but building upon this foundation, more complex, situation-specific benefit and cost structures can be constructed and used to train models. ```def custom_loss_wrapper(fn_cost=1, fp_cost=1): def custom_loss(y_true, y_pred, fn_cost=fn_cost, fp_cost=fp_cost): h = K.ones_like(y_pred) fn_value = fn_cost * h fp_value = fp_cost * h weighted_values = y_true * K.abs(1-y_pred)*fn_value + (1-y_true) * K.abs(y_pred)*fp_value loss = K.mean(weighted_values) return loss return custom_loss``` Training the Model Since we are using Amazon SageMaker to train a custom model, all of the code related to building and training the model is located in a Docker container image stored in Amazon ECR. The code shown here is an example of the code contained in the Docker container image.The files containing the actual model code (and custom loss function, mirroring the copy shown earlier) as well as all the files necessary to create the Docker container image and push it to Amazon ECR are located in the [repository associated with this blog post](https://github.com/aws-samples/amazon-sagemaker-custom-loss-function). We construct and train three models so we can compare the predictions of various models using Keras' built-in loss function as well as our custom loss function. We use a binary classification model that predicts the probability that a tumor is malignant.The three models are:1. A binary classification model that uses Keras' built-in binary cross-entropy loss with equal weights for false negative and false positive errors.2. A binary classification model that uses the custom loss function defined previously with false negatives weighted 5 times as heavily as false positives.3. A binary classification model that uses the custom loss function defined previously with false negatives weighted 200 times as heavily as false positives.The costs used in the last model's loss function are based upon the medical literature.3,4 The costs of screening are measured in QALYs. One QALY is defined as one year of life in full health (1 year x 1.0 health). For example, if an individual is at half health, that is, 0.5 of full health, then one year of life for that individual is equal to 0.5 QALYs (1 year x 0.5 health). Two years of life for that individual is worth 1 QALY (2 years x 0.5 health). | Outcome | QALY ||----|---||True Negative | 0 ||False Positive| -0.01288|| True Positive| -0.3528||False Negative |-2.52| Here, a true negative outcome is measured as the baseline of costs, that is, all other costs in the table are measured relative to a patient without breast cancer that tests negative for breast cancer. A woman with breast cancer that tests negative loses 2.52 QALYs relative to the baseline, and a woman without breast cancer that tests positive loses 0.0128767 QALYs (or about 4.7 days) relative to the baseline. A QALY has an estimated economic value of $100,000 USD. So these values can also be translated into dollar costs by multiplying the cost in QALYs by 100,000 USD. Given these values, a false negative error is about 200 times more costly than a false positive one. See the medical literature referenced in the introduction for more detail surrounding these costs.The middle model value of 5 was chosen for demonstration purposes. With these costs in hand, we can now estimate the model. Estimating the parameters of a model in Keras is a three-step process:1. Defining the model.2. Compiling the model.3. Training the model. Defining the Model Architecture First, we define the structure of the model. In this case, the model consists of a single node in a single layer. That is, for each model that follows, we add a single Dense layer with a single unit that takes a linear combination of features and passes that linear combination to a sigmoid function that outputs a value between 0 and 1. Again, the actual executable version of the code is in the Docker container, but is shown here for illustrative purposes.We'll provide the relative weights in a later step. ```The 'built-in' model is trained on Keras' built-in binary crossentropy loss function.model_builtin = Sequential()model_builtin.add(Dense(units=num_classes, input_dim=input_dim, activation='sigmoid'))The 'custom' model is trained on our custom loss function that weighs false negatives 5 time's more heavily than false positives.model_five = Sequential()model_five.add(Dense(units=num_classes, input_dim=input_dim, activation='sigmoid'))The 'medical' model is trained on our custom loss function that assigns weights false negatives and false positives derived from the medical literature.model_medical = Sequential()model_medical.add(Dense(units=num_classes, input_dim=input_dim, activation='sigmoid'))``` Compiling Model Next, let's compile the models. Compiling a model refers to configuring the learning process. We need to specify the optimization algorithm and the loss function that we will use to train the model. This is the step in which we incorporate our custom loss function and relative weights into the model training process. ```Trained using built-in loss functionmodel_builtin.compile(loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])Trained using custom loss function with false negatives weighted 5 times more heavily than false positivescustom_loss_five = custom_loss_wrapper(fn_cost=5, fp_cost=1)model_five.compile(loss=custom_loss_five, optimizer='sgd', metrics=['accuracy'])Trained using custom loss function with false negatives weighted about 200 times more heavily than false positivescustom_loss_medical = custom_loss_wrapper(fn_cost=200, fp_cost=1)model_medical.compile(loss=custom_loss_medical, optimizer='sgd', metrics=['accuracy'])``` Training the Model Now we’re ready to train the models. To do this, we call the fit method and provide the training data, number of epochs, and batch size. Whether you use a built-in or a custom loss function, the code is the same in this step. ```model_builtin.fit(train_x, train_y, epochs=50, batch_size=32, verbose=0)model_five.fit(train_x, train_y, epochs=50, batch_size=32, verbose=0)model_medical.fit(train_x, train_y, epochs=50, batch_size=32, verbose=0)``` Building the Docker Image By the executing the line of code below, we are building the Docker image that contains the custom loss function and model code and pushing image to Amazon Elastic Container Registry (ECR). The "image_name" defined at the top of this notebook is the name that will be assigned to the repository in ECR that contains this image.
###Code
! ./build_and_push.sh {image_name}
###Output
WARNING! Using --password via the CLI is insecure. Use --password-stdin.
Login Succeeded
Sending build context to Docker daemon 2.803MB
Step 1/8 : FROM tensorflow/tensorflow:1.9.0
1.9.0: Pulling from tensorflow/tensorflow
Digest: sha256:92ad7f5da1f0e7c2c7b714b77b12424ae3d7971510d8ff8673b8b0695c3fd1c9
Status: Downloaded newer image for tensorflow/tensorflow:1.9.0
---> caab7ec02690
Step 2/8 : MAINTAINER Scott Gregoire <[email protected]>
---> Using cache
---> 93b5c47e577b
Step 3/8 : RUN pip install keras==2.2.0 h5py==2.8.0 pandas==0.22.0
---> Running in 21d4fd23fbe8
Collecting keras==2.2.0
Downloading https://files.pythonhosted.org/packages/68/12/4cabc5c01451eb3b413d19ea151f36e33026fc0efb932bf51bcaf54acbf5/Keras-2.2.0-py2.py3-none-any.whl (300kB)
Requirement already satisfied: h5py==2.8.0 in /usr/local/lib/python2.7/dist-packages (2.8.0)
Collecting pandas==0.22.0
Downloading https://files.pythonhosted.org/packages/6b/b5/76538d8a202f8c368d30c18892d33664d1a3b2c078af8513ee5b5d172629/pandas-0.22.0-cp27-cp27mu-manylinux1_x86_64.whl (24.3MB)
Requirement already satisfied: scipy>=0.14 in /usr/local/lib/python2.7/dist-packages (from keras==2.2.0) (1.1.0)
Requirement already satisfied: numpy>=1.9.1 in /usr/local/lib/python2.7/dist-packages (from keras==2.2.0) (1.14.5)
Collecting keras-preprocessing==1.0.1 (from keras==2.2.0)
Downloading https://files.pythonhosted.org/packages/f8/33/275506afe1d96b221f66f95adba94d1b73f6b6087cfb6132a5655b6fe338/Keras_Preprocessing-1.0.1-py2.py3-none-any.whl
Collecting keras-applications==1.0.2 (from keras==2.2.0)
Downloading https://files.pythonhosted.org/packages/e2/60/c557075e586e968d7a9c314aa38c236b37cb3ee6b37e8d57152b1a5e0b47/Keras_Applications-1.0.2-py2.py3-none-any.whl (43kB)
Requirement already satisfied: six>=1.9.0 in /usr/local/lib/python2.7/dist-packages (from keras==2.2.0) (1.11.0)
Collecting pyyaml (from keras==2.2.0)
Downloading https://files.pythonhosted.org/packages/9e/a3/1d13970c3f36777c583f136c136f804d70f500168edc1edea6daa7200769/PyYAML-3.13.tar.gz (270kB)
Requirement already satisfied: python-dateutil in /usr/local/lib/python2.7/dist-packages (from pandas==0.22.0) (2.7.3)
Requirement already satisfied: pytz>=2011k in /usr/local/lib/python2.7/dist-packages (from pandas==0.22.0) (2018.5)
Building wheels for collected packages: pyyaml
Running setup.py bdist_wheel for pyyaml: started
Running setup.py bdist_wheel for pyyaml: finished with status 'done'
Stored in directory: /root/.cache/pip/wheels/ad/da/0c/74eb680767247273e2cf2723482cb9c924fe70af57c334513f
Successfully built pyyaml
Installing collected packages: keras-preprocessing, keras-applications, pyyaml, keras, pandas
Found existing installation: pandas 0.23.3
Uninstalling pandas-0.23.3:
Successfully uninstalled pandas-0.23.3
Successfully installed keras-2.2.0 keras-applications-1.0.2 keras-preprocessing-1.0.1 pandas-0.22.0 pyyaml-3.13
[91mYou are using pip version 10.0.1, however version 18.0 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.
[0mRemoving intermediate container 21d4fd23fbe8
---> 9d75c54e69f0
Step 4/8 : ENV PYTHONUNBUFFERED=TRUE
---> Running in f932dbcd9e1c
Removing intermediate container f932dbcd9e1c
---> 3fd8b6d3e2fe
Step 5/8 : ENV PYTHONDONTWRITEBYTECODE=TRUE
---> Running in b858233ef2aa
Removing intermediate container b858233ef2aa
---> 02ccb05b4946
Step 6/8 : ENV PATH="/opt/program:${PATH}"
---> Running in 4f52675b4300
Removing intermediate container 4f52675b4300
---> 17c13c78bc8f
Step 7/8 : COPY container/custom_loss /opt/program
---> f35978e8b68d
Step 8/8 : WORKDIR /opt/program
Removing intermediate container c4f84aa60111
---> b6d9e5b78e40
Successfully built b6d9e5b78e40
Successfully tagged custom-loss:latest
The push refers to repository [121969496650.dkr.ecr.us-west-2.amazonaws.com/custom-loss]
[1Bde93ec25: Preparing
[1B6d43bb26: Preparing
[1B86076270: Preparing
[1B571a556a: Preparing
[1Be8bfb9f2: Preparing
[1Bd84e960d: Preparing
[1Bbc8a3053: Preparing
[1B23957e9d: Preparing
[1Be3bd3cf3: Preparing
[1Bcff8801c: Preparing
[1B91e51d73: Preparing
[1Bd9e65295: Preparing
[1B45e78935: Preparing
[1B1dc646ba: Preparing
[6Bcff8801c: Pushed 372.3MB/363.3MB[14A[1K[K[11A[1K[KPushing 3.337MB/134.6MB[14A[1K[K[15A[1K[K[14A[1K[K[14A[1K[K[14A[1K[K[10A[1K[K[14A[1K[K[8A[1K[K[10A[1K[K[8A[1K[K[10A[1K[K[14A[1K[K[9A[1K[K[14A[1K[K[8A[1K[K[7A[1K[K[14A[1K[K[8A[1K[K[14A[1K[K[7A[1K[K[10A[1K[K[8A[1K[K[9A[1K[K[8A[1K[K[10A[1K[K[8A[1K[K[7A[1K[K[9A[1K[K[14A[1K[K[7A[1K[K[14A[1K[K[9A[1K[K[8A[1K[K[9A[1K[K[14A[1K[K[8A[1K[K[14A[1K[K[8A[1K[K[14A[1K[K[7A[1K[K[8A[1K[K[10A[1K[K[8A[1K[K[10A[1K[K[8A[1K[K[9A[1K[K[8A[1K[K[10A[1K[K[14A[1K[K[10A[1K[K[7A[1K[K[8A[1K[K[9A[1K[K[8A[1K[K[14A[1K[K[9A[1K[K[7A[1K[K[7A[1K[K[9A[1K[K[10A[1K[K[7A[1K[K[9A[1K[K[10A[1K[K[9A[1K[K[8A[1K[K[9A[1K[K[14A[1K[K[7A[1K[K[10A[1K[K[8A[1K[K[7A[1K[K[14A[1K[K[7A[1K[K[14A[1K[K[8A[1K[K[10A[1K[K[14A[1K[K[9A[1K[K[8A[1K[K[10A[1K[K[7A[1K[K[10A[1K[K[7A[1K[K[8A[1K[K[7A[1K[K[14A[1K[K[8A[1K[K[10A[1K[K[7A[1K[K[10A[1K[K[14A[1K[K[8A[1K[K[10A[1K[K[8A[1K[K[10A[1K[K[8A[1K[K[14A[1K[K[8A[1K[K[10A[1K[K[8A[1K[K[10A[1K[K[14A[1K[K[8A[1K[K[14A[1K[K[8A[1K[K[14A[1K[K[8A[1K[K[8A[1K[K[10A[1K[K[7A[1K[K[8A[1K[K[8A[1K[K[10A[1K[K[6A[1K[K[6A[1K[K[14A[1K[K[6A[1K[K[14A[1K[K[8A[1K[K[6A[1K[K[14A[1K[K[10A[1K[K[10A[1K[K[6A[1K[K[10A[1K[K[6A[1K[K[14A[1K[K[10A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[8A[1K[K[10A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[14A[1K[K[10A[1K[K[6A[1K[K[9A[1K[K[6A[1K[K[10A[1K[K[14A[1K[K[10A[1K[K[8A[1K[K[14A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[14A[1K[K[6A[1K[K[6A[1K[K[10A[1K[K[8A[1K[K[6A[1K[K[10A[1K[K[5A[1K[K[10A[1K[K[8A[1K[K[10A[1K[K[8A[1K[K[10A[1K[K[14A[1K[K[4A[1K[K[8A[1K[K[14A[1K[K[8A[1K[K[14A[1K[K[14A[1K[K[6A[1K[K[14A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[10A[1K[K[6A[1K[K[10A[1K[K[6A[1K[K[14A[1K[K[8A[1K[K[14A[1K[K[10A[1K[K[6A[1K[K[10A[1K[K[2A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[10A[1K[K[8A[1K[K[10A[1K[K[8A[1K[K[10A[1K[K[6A[1K[K[10A[1K[K[8A[1K[K[10A[1K[K[10A[1K[K[1A[1K[K[10A[1K[K[1A[1K[K[10A[1K[K[6A[1K[K[1A[1K[K[10A[1K[K[1A[1K[K[10A[1K[K[1A[1K[K[1A[1K[K[8A[1K[K[10A[1K[K[6A[1K[K[10A[1K[K[1A[1K[K[8A[1K[K[10A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[10A[1K[K[8A[1K[K[10A[1K[K[6A[1K[K[10A[1K[K[8A[1K[K[14A[1K[K[10A[1K[K[6A[1K[K[10A[1K[K[8A[1K[K[1A[1K[K[8A[1K[K[10A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[10A[1K[K[8A[1K[K[10A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[10A[1K[K[6A[1K[K[10A[1K[K[6A[1K[K[10A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[10A[1K[K[6A[1K[K[10A[1K[K[8A[1K[K[1A[1K[K[8A[1K[K[1A[1K[K[10A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[1A[1K[K[6A[1K[K[1A[1K[K[6A[1K[K[1A[1K[K[6A[1K[K[1A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[10A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[1A[1K[K[10A[1K[K[8A[1K[K[6A[1K[K[10A[1K[K[6A[1K[K[6A[1K[K[1A[1K[K[6A[1K[K[8A[1K[K[1A[1K[K[8A[1K[K[1A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[10A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[10A[1K[K[1A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[10A[1K[K[1A[1K[K[8A[1K[K[1A[1K[K[10A[1K[K[8A[1K[K[1A[1K[K[10A[1K[K[8A[1K[K[10A[1K[K[1A[1K[K[6A[1K[K[8A[1K[K[10A[1K[K[8A[1K[K[1A[1K[K[10A[1K[K[1A[1K[K[10A[1K[K[10A[1K[K[10A[1K[K[8A[1K[K[10A[1K[K[8A[1K[K[10A[1K[K[8A[1K[K[1A[1K[K[8A[1K[K[1A[1K[K[1A[1K[K[8A[1K[K[1A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[1A[1K[K[10A[1K[K[1A[1K[K[6A[1K[K[10A[1K[K[8A[1K[K[10A[1K[K[6A[1K[K[8A[1K[K[1A[1K[K[8A[1K[K[1A[1K[K[1A[1K[K[1A[1K[K[K[10A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[10A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[10A[1K[K[8A[1K[K[10A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[10A[1K[K[6A[1K[K[10A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[10A[1K[K[6A[1K[K[10A[1K[K[6A[1K[K[10A[1K[K[6A[1K[K[10A[1K[K[1A[1K[K[6A[1K[K[6A[1K[K[8A[1K[K[10A[1K[K[8A[1K[K[10A[1K[K[6A[1K[K[10A[1K[K[10A[1K[K[6A[1K[K[10A[1K[K[6A[1K[K[10A[1K[K[10A[1K[K[6A[1K[K[10A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[8A[1K[K[6A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[1K[K[8A[1K[K[8A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[6A[1K[K[8A[1K[K[6A[1K[K[6A[1K[K[6A[1K[Klatest: digest: sha256:ac2a8e8ccc289631da8808ebfa86110dd49bb8e7eb74c2cbe0fb2882e91311dd size: 3462
###Markdown
Executing the Amazon SageMaker Training JobAs mentioned previously, we perform the actual training of the binary classifier by packaging the model definition and training code in a Docker container and using the Amazon SageMaker bring-your-own-model training functionality to estimate the model's parameters.The following code blocks train three versions of the classifier:1. One with Keras' built-in binary_cross-entropy loss function.2. One with a custom loss function that weighs false negatives 5 times more heavily than false positives.3. One with a custom loss function that weighs false negatives 200 times more heavily than false positives. Builtin Loss Function Model We specify parameters of the Amazon SageMaker training job: the Amazon SageMaker role, account, region, and the specify container image to use to train the classifier
###Code
sess = sage.Session()
account = sess.boto_session.client('sts').get_caller_identity()['Account']
region = sess.boto_session.region_name
image = '{}.dkr.ecr.{}.amazonaws.com/{}:latest'.format(account, region, image_name)
###Output
_____no_output_____
###Markdown
We create and execute an Amazon SageMaker training job for builtin loss function, that is, Keras's binary cross-entropy loss function. By passing the "loss_function_type" set to "builtin", Amazon SageMaker knows to use Keras's binary cross-entropy loss function.
###Code
#Specify type of loss function with which to train the model. Specify false negative and false negative costs
#if using a custom loss function
hyperparameters_builtin = {
"loss_function_type": "builtin"
}
model_builtin = sage.estimator.Estimator(image_name=image,
role=role,
train_instance_count=1,
train_instance_type='ml.c4.2xlarge',
output_path="s3://{}/{}/output".format(bucket, prefix),
hyperparameters=hyperparameters_builtin,
sagemaker_session=sess)
model_builtin.fit({'train':"s3://{}/{}/train".format(bucket, prefix)})
###Output
INFO:sagemaker:Creating training-job with name: custom-loss-2018-08-06-17-04-51-897
###Markdown
5:1 Custom Loss Function Model We create and execute an Amazon SageMaker training job for the custom 5:1 loss function, that is, custom loss with false negatives being 5 times more costly than false positives. By passing the "loss_function_type" set to "custom" and "fn_cost" to "5" and "fp_cost" to "1", respectively, Amazon SageMaker knows to use the custom loss function with the specified misclassification costs.
###Code
#Specify type of loss function with which to train the model. Specify false negative and false negative costs
#if using a custom loss function
hyperparameters_five = {
"loss_function_type": "custom",
"fn_cost": 5,
"fp_cost": 1
}
model_five = sage.estimator.Estimator(image_name=image,
role=role,
train_instance_count=1,
train_instance_type='ml.c4.2xlarge',
output_path="s3://{}/{}/output".format(bucket, prefix),
hyperparameters=hyperparameters_five,
sagemaker_session=sess)
model_five.fit({'train':"s3://{}/{}/train".format(bucket, prefix)})
###Output
INFO:sagemaker:Creating training-job with name: custom-loss-2018-08-06-17-07-33-390
###Markdown
Medical Custom Loss Function Model We create and execute an Amazon SageMaker training job for custom 200:1 loss function, that is, custom loss with false negatives being 200 times more costly than false positives. By passing the "loss_function_type" set to "custom" and "fn_cost" to "200" and "fp_cost" to "1", respectively, Amazon SageMaker knows to use the custom loss function with the specified misclassification costs.
###Code
#Specify type of loss function with which to train the model. Specify false negative and false negative costs
#if using a custom loss function
hyperparameters_medical = {
"loss_function_type": "custom",
"fn_cost": 200,
"fp_cost": 1
}
model_medical = sage.estimator.Estimator(image_name=image,
role=role,
train_instance_count=1,
train_instance_type='ml.c4.2xlarge',
output_path="s3://{}/{}/output".format(bucket, prefix),
hyperparameters=hyperparameters_medical,
sagemaker_session=sess)
model_medical.fit({'train':"s3://{}/{}/train".format(bucket, prefix)})
###Output
INFO:sagemaker:Creating training-job with name: custom-loss-2018-08-06-17-10-45-209
###Markdown
Making Predictions After training the model, Amazon SageMaker uploads the trained model artifact to the S3 bucket we specifed in the output_path parameter in the training jobs. We now load the model artifacts from S3 and make predictions with all three models variants, then compare results. Loading Builtin Loss Function Model
###Code
#We load the trained model locally
builtin_tar_name = '{}/output/{}/output/model.tar.gz'.format(prefix, model_builtin.latest_training_job.job_name)
aws.resource('s3').Bucket(bucket).download_file(builtin_tar_name, 'model_builtin.tar.gz')
#Amazon SageMaker creates a tarred model artifact so to reconstruct the model, we first need to untar the model.
!mkdir builtin_model
trained_model_builtin = !tar xzvf 'model_builtin.tar.gz' -C 'builtin_model'
#We then rebuild Keras model from the Amazon SageMaker model artifact.
arch_builtin = [x for x in trained_model_builtin if 'json' in x][0]
weights_builtin = [x for x in trained_model_builtin if 'h5' in x][0]
with open('builtin_model/' + arch_builtin, 'rb') as f:
model_load_builtin = keras.models.model_from_json(f.read())
model_load_builtin.load_weights('builtin_model/' + weights_builtin)
###Output
_____no_output_____
###Markdown
Loading 5:1 Custom Loss Function Model
###Code
#We load the trained model locally
five_tar_name = '{}/output/{}/output/model.tar.gz'.format(prefix, model_five.latest_training_job.job_name)
aws.resource('s3').Bucket(bucket).download_file(five_tar_name, 'model_five.tar.gz')
#Amazon SageMaker creates a tarred model artifact so to reconstruct the model, we first need to untar the model.
!mkdir five_model
trained_model_five = !tar xzvf 'model_five.tar.gz' -C 'five_model'
#We then rebuild Keras model from the Amazon SageMaker model artifact.
arch_five = [x for x in trained_model_five if 'json' in x][0]
weights_five = [x for x in trained_model_five if 'h5' in x][0]
with open('five_model/' + arch_five, 'r') as f:
model_load_five = keras.models.model_from_json(f.read())
model_load_five.load_weights('five_model/' + weights_five)
###Output
_____no_output_____
###Markdown
Loading Medical Custom Loss Function Model
###Code
#We load the trained model locally
medical_tar_name = '{}/output/{}/output/model.tar.gz'.format(prefix, model_medical.latest_training_job.job_name)
aws.resource('s3').Bucket(bucket).download_file(medical_tar_name, 'model_medical.tar.gz')
#Amazon SageMaker creates a tarred model artifact so to reconstruct the model, we first need to untar the model.
!mkdir medical_model
trained_model_medical = !tar xzvf 'model_medical.tar.gz' -C 'medical_model'
#We then rebuild Keras model from the Amazon SageMaker model artifact.
arch_medical = [x for x in trained_model_medical if 'json' in x][0]
weights_medical = [x for x in trained_model_medical if 'h5' in x][0]
with open('medical_model/' + arch_medical, 'r') as f:
model_load_medical = keras.models.model_from_json(f.read())
model_load_medical.load_weights('medical_model/' + weights_medical)
###Output
_____no_output_____
###Markdown
Making Predictions
###Code
#continuous and discrete predictions for builtin loss function model
y_builtin_pred = model_load_builtin.predict(x_test_scld)
y_builtin_pred_discrete = y_builtin_pred > cutoff
#continuous and discrete predictions for 5:1 custom loss function model
y_five_pred = model_load_five.predict(x_test_scld)
y_five_pred_discrete = y_five_pred > cutoff
#continuous and discrete predictions for 200:1 custom loss function model
y_medical_pred = model_load_medical.predict(x_test_scld)
y_medical_pred_discrete = y_medical_pred > cutoff
#here we divide the model predictions based upon whether ground-truth is positive or negative
y_builtin_pred_pos = y_builtin_pred[y_test==1]
y_builtin_pred_neg = y_builtin_pred[y_test==0]
y_five_pred_pos = y_five_pred[y_test==1]
y_five_pred_neg = y_five_pred[y_test==0]
y_medical_pred_pos = y_medical_pred[y_test==1]
y_medical_pred_neg = y_medical_pred[y_test==0]
###Output
_____no_output_____
###Markdown
Analyzing the Results What characteristics are we generally looking for in a well-performing model?1. There should be a small number of false negatives, that is, a small number of malignant tumors classified as benign. 2. Predictions should cluster closely around ground truth values, that is, predictions should cluster closely around 0 and 1. Keep in mind as you rerun this notebook that the data set used is small (569 instances), and therefore the test set is even smaller (143 instances). Because of this, the exact distribution of predictions and prediction errors of the model may vary from run to run due to sampling error. Despite this, the following general results hold across model runs. Accuracy and the ROC Curve First, we'll show traditional measures of the model. Here, we generate Accuracy and AUC values for all three models.
###Code
acc_builtin = accuracy_score(y_test, y_builtin_pred_discrete)
acc_five = accuracy_score(y_test, y_five_pred_discrete)
acc_medical = accuracy_score(y_test, y_medical_pred_discrete)
auc_builtin = roc_auc_score(y_test, y_builtin_pred)
auc_five = roc_auc_score(y_test, y_five_pred)
auc_medical = roc_auc_score(y_test, y_medical_pred)
pd.DataFrame({'Accuracy': [acc_builtin, acc_five, acc_medical], 'AUC': [auc_builtin, auc_five, auc_medical]} , index=['Built-in', '5:1', 'Medical'])
###Output
_____no_output_____
###Markdown
Now, we'll generate ROC curves for all three models.
###Code
fpr_builtin, tpr_builtin, thresholds_builtin = roc_curve(y_test, y_builtin_pred)
fpr_five, tpr_five, thresholds_five = roc_curve(y_test, y_five_pred)
fpr_medical, tpr_medical, thresholds_medical = roc_curve(y_test, y_medical_pred)
fig, axes = plt.subplots(1,1, figsize=(9, 6))
axes.set(xlabel='False Positive Rate', ylabel='True Positive Rate', title='ROC Curve')
axes.plot(fpr_builtin, tpr_builtin, label='Built-in')
axes.plot(fpr_five, tpr_five, label='5:1')
axes.plot(fpr_medical, tpr_medical, label='Medical')
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.legend()
###Output
_____no_output_____
###Markdown
By these traditional measures, the two custom loss function models do not perform as well as the built-in loss function (by a small margin).However: accuracy is less relevant in judging the quality of these models. In fact, accuracy may be lowest in the "best" model because we are willing to have more false positives as long as we decrease the number of false negatives sufficiently.Looking at the ROC curve and AUC score for these three models, all models appear very similar according to these measures. However, neither of these metrics show us how the predicted scores are distributed over the [0, 1] interval so we are not able to determine where those predictions are clustered. Classification Report Keep in mind that the cost of a false negative is increasing as we move through these three models. That implies that the number of false negatives is likely to decrease in each successive model. What does this imply for the values in these classification reports? It implies that the negative class (benign) should have higher precision and that the positive class (malignant) should have higher recall. (Remember that precision = tp / (tp + fp); recall = tp / (tp + fn).) Remember that for our classification problem we are classifying tumors as benign or malignant. According to the costs reported in the medical literature cited previously, a false negative is much more costly than a false positive. Because of that, we want to classify all malignant tumors as such and are not bothered by that resulting in more false positive predictions (to a point). Therefore, for the negative class (benign), we care more about having a high precision, and for the positive class (malignant), we care more about having a high recall.
###Code
# Define precision / recall matrix for built in
display(HTML('<b>Built-in Loss Function</b>'))
print(classification_report(y_test, y_builtin_pred_discrete))
# Define precision / recall matrix for 5:1
display(HTML('<b>5:1 Custom Loss Function</b>'))
print(classification_report(y_test, y_five_pred_discrete))
# Define precision / recall matrix for medical
display(HTML('<b>Medical Custom Loss Function</b>'))
print(classification_report(y_test, y_medical_pred_discrete))
###Output
_____no_output_____
###Markdown
These classification reports show that we've achieved our goal: the medical model has the highest precision for benign, and the highest recall for malignant.What this implies is that when using the medical model, we are least likely to falsely classify a malignant tumor as benign, and we are most likely to identify all malignant tumors as malignant.Looking at the detail of these reports allows us to see that the medical model is the "best" of these three models, despite having the lowest F1-score, and the lowest average precision and recall. Confusion Matrix To better understand the errors, a better tool is the confusion matrix.Since our goal is to reduce the number of false negatives, the model with the fewest false negatives is "best", providing the increase in false positives is not excessive. As we move through these three confusion matrices, the cost of a false negative relative to a false positive increases. As such, we expect the number of false negatives to decrease and the number of false positives to increase. However, the number of true positives and true negatives may also shift, as we're training the model to weight differently than before.
###Code
# Print built-in loss confusion matrix
conf_matrix(y_builtin_pred_pos, y_builtin_pred_neg, cutoff, 'Built-in Loss Function' )
# Print 5:1 custom loss confusion matrix
conf_matrix(y_five_pred_pos, y_five_pred_neg, cutoff, '5:1 Custom Loss Function' )
# Print medical custom loss confusion matrix
conf_matrix(y_medical_pred_pos, y_medical_pred_neg, cutoff, 'Medical Custom Loss Function' )
###Output
_____no_output_____
###Markdown
We can see from the results that modifying the loss function values provided when training the model allows us to shift the balance between the categories of error. Using different weightings for the relative cost has a significant impact on the errors, and moves some of the other predictions as well. An interesting direction for future research is to explore the changing features that are identified by the model in support of these prediction differences.This gives us a powerful lever to influence the model based on the moral, ethical, or economic impacts of the decisions we make about the relative weights of the different errors. Custom Confusion Matrix A specific observation is classified as positive or negative by comparing its score to a threshold. Intuitively, the further away the score is from the threshold chosen, the higher is the assumed probability that the prediction is correct (assuming that the threshold value is well-chosen).When comparing the model's prediction and the threshold used for dividing classes, it's possible that the values are very close to the threshold. In the extreme, the difference in values between a "true" or a "false" could be less than the error between two different readings of an input sensor or measurement; or even, less than the rounding error of a floating point library. In the worst case, the majority of the scores for our observations could be clustered quite close to the threshold. These “close confusions” are not visible in the confusion matrix, or in the previous F1-scores or ROC curves.Intuitively, it's desirable to have the majority of the scores further away from the threshold, or, conversely, to identify the threshold based on gaps in the distribution of scores. (In cartography, for example, the [Jenks' natural breaks method](https://en.wikipedia.org/wiki/Jenks_natural_breaks_optimization) is frequently used to address the same problem.) The following graphs give us a tool to explore the relationship of the scores to the threshold.Each of the following sets of distribution plots shows the actual scores for each sample in the confusion matrix. In each set, the top histogram plots the distribution of predicted scores for all actual negatives, that is, predicted scores for benign tumors (the top row of the confusion matrix). The bottom histogram plots predicted scores for actual positives (the bottom row).The correctly classified observations on each plot are colored blue, and the incorrectly classified observations are colored orange. The threshold value of 0.5, used in other functions in this notebook, is used for coloring the plots. However, this threshold choice does NOT affect the actual scores or shape or level of the plots, only the coloring. Another threshold could be chosen, and the results in this section would still hold.In the charts below, a "good" distribution is one in which the predictions are largely grouped around the 0 and 1 points. More specifically, a "good" set of histograms would have the actual positives largely clustered around 1 with few false negatives, that is, few orange points. We would like to see the actual negatives clustered around 0, but for this use case we are willing to accept a prediction spread over the support with false positives as long as this gets us a small number of false negatives with predictions clustered around 1 for the actual positives.
###Code
custom_pred_distro(y_builtin_pred_pos, y_builtin_pred_neg, cutoff, title='Built-in Loss Function')
custom_pred_distro(y_five_pred_pos, y_five_pred_neg, cutoff, title='5:1 Loss Function')
custom_pred_distro(y_medical_pred_pos, y_medical_pred_neg, cutoff, title='Medical Loss Function')
###Output
_____no_output_____
###Markdown
We can see from these plots that as we increase the ratio, the distributions shift. As the ratio between the error types increases, the model pushes a larger number of samples to the extremes, essentially becoming much more discriminatory.We can also see that few of our actual positives have scores close to the cutoff. The model is demonstrating increased "certainty" in its classifications of the positives. Note that in many of our runs, 1 observation - 38 - is the lone incorrectly classified actual positive. This observation may have some different characteristics from the others, that may be indicative of a rare and different pattern that bears additional investigation. Expected Value We now calculate the expected value (economic value) of each of the three classification models. The expected value captures the probability-weighted loss expressed in US dollars that an individual patient is expected to suffer if given a specific diagnostic test. The diagnostic test with the highest expected value is considered the “best” under this metric. The expected value is stated in US dollars. For an explanation of QALY and the dollar values associated with testing outcomes defined in the following cell, see the discussion of screening costs earlier in this blog post.
###Code
dollar_per_qaly = 100000
v_tn = 0 * dollar_per_qaly
v_fp = -0.0128767 * dollar_per_qaly
v_fn = -2.52 * dollar_per_qaly
v_tp = -0.3528 * dollar_per_qaly
###Output
_____no_output_____
###Markdown
Note that in this section we are now reflecting the value of all four possible test outcomes - true and false negatives, as well as true and false positives.
###Code
ev_builtin = expected_value(y_builtin_pred_pos, y_builtin_pred_neg, v_tn=v_tn, v_fp=v_fp, v_fn=v_fn, v_tp=v_tp)
ev_five = expected_value(y_five_pred_pos, y_five_pred_neg, v_tn=v_tn, v_fp=v_fp, v_fn=v_fn, v_tp=v_tp)
ev_medical = expected_value(y_medical_pred_pos, y_medical_pred_neg, v_tn=v_tn, v_fp=v_fp, v_fn=v_fn, v_tp=v_tp)
df_ev = pd.DataFrame(data = [ev_builtin['ev'], ev_five['ev'], ev_medical['ev']],
index = ['Builtiin', '5:1', 'Medical'],
columns=['Expected Value'])
def currency_format(x):
if x >= 0:
return '${:,.2f}'.format(x)
else:
return '-${:,.2f}'.format(abs(x))
df_ev['Expected Value'] = df_ev['Expected Value'].map(currency_format)
df_ev
###Output
_____no_output_____ |
incubating/examples/r_mnist/r_mnist.ipynb | ###Markdown
R MNIST Model * Wrap an R model (using the ```caret``` library) for use as a prediction microservice in seldon-core * Run locally on Docker to test * Deploy on seldon-core running on minikube Dependencies * [Helm](https://github.com/kubernetes/helm) * [Minikube](https://github.com/kubernetes/minikube) * [S2I](https://github.com/openshift/source-to-image) * R```bashpip install seldon-core``` Train locally
###Code
!rm -f *ubyte && ./get_data.sh && Rscript train.R
###Output
--2019-04-24 16:43:55-- http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz
Resolving yann.lecun.com (yann.lecun.com)... 216.165.22.6
Connecting to yann.lecun.com (yann.lecun.com)|216.165.22.6|:80... connected.
HTTP request sent, awaiting response... 200 OK
Length: 9912422 (9.5M) [application/x-gzip]
Saving to: ‘train-images-idx3-ubyte.gz’
train-images-idx3-u 100%[===================>] 9.45M 2.56MB/s in 4.5s
2019-04-24 16:43:59 (2.10 MB/s) - ‘train-images-idx3-ubyte.gz’ saved [9912422/9912422]
--2019-04-24 16:43:59-- http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz
Resolving yann.lecun.com (yann.lecun.com)... 216.165.22.6
Connecting to yann.lecun.com (yann.lecun.com)|216.165.22.6|:80... connected.
HTTP request sent, awaiting response... 200 OK
Length: 28881 (28K) [application/x-gzip]
Saving to: ‘train-labels-idx1-ubyte.gz’
train-labels-idx1-u 100%[===================>] 28.20K --.-KB/s in 0.08s
2019-04-24 16:44:00 (346 KB/s) - ‘train-labels-idx1-ubyte.gz’ saved [28881/28881]
--2019-04-24 16:44:00-- http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz
Resolving yann.lecun.com (yann.lecun.com)... 216.165.22.6
Connecting to yann.lecun.com (yann.lecun.com)|216.165.22.6|:80... connected.
HTTP request sent, awaiting response... 200 OK
Length: 1648877 (1.6M) [application/x-gzip]
Saving to: ‘t10k-images-idx3-ubyte.gz’
t10k-images-idx3-ub 100%[===================>] 1.57M 1.08MB/s in 1.5s
2019-04-24 16:44:01 (1.08 MB/s) - ‘t10k-images-idx3-ubyte.gz’ saved [1648877/1648877]
--2019-04-24 16:44:01-- http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz
Resolving yann.lecun.com (yann.lecun.com)... 216.165.22.6
Connecting to yann.lecun.com (yann.lecun.com)|216.165.22.6|:80... connected.
HTTP request sent, awaiting response... 200 OK
Length: 4542 (4.4K) [application/x-gzip]
Saving to: ‘t10k-labels-idx1-ubyte.gz’
t10k-labels-idx1-ub 100%[===================>] 4.44K --.-KB/s in 0s
2019-04-24 16:44:01 (365 MB/s) - ‘t10k-labels-idx1-ubyte.gz’ saved [4542/4542]
Loading required package: lattice
Loading required package: ggplot2
Loading required package: foreach
Loading required package: iterators
Loading required package: parallel
###Markdown
Wrap model using s2i
###Code
!s2i build . seldonio/seldon-core-s2i-r:0.3 r-mnist:0.1
!docker run --name "mnist_predictor" -d --rm -p 5000:5000 r-mnist:0.1
###Output
5104ca6b9b9b9f8a778c46ae1c9d84a733ea63732dce128066306f7fcc49bb82
###Markdown
Send some random features that conform to the contract
###Code
!seldon-core-tester contract.json 0.0.0.0 5000 -p
!docker rm mnist_predictor --force
###Output
mnist_predictor
###Markdown
Test using Minikube
###Code
!minikube start --memory 4096
###Output
_____no_output_____
###Markdown
Setup Seldon CoreUse the setup notebook to [Setup Cluster](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.htmlSetup-Cluster) with [Ambassador Ingress](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.htmlAmbassador) and [Install Seldon Core](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.htmlInstall-Seldon-Core). Instructions [also online](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html).
###Code
!eval $(minikube docker-env) && s2i build . seldonio/seldon-core-s2i-r:0.1 r-mnist:0.1
!kubectl create -f r_mnist_deployment.json
###Output
seldondeployment.machinelearning.seldon.io/seldon-deployment-example created
###Markdown
Wait until ready (replicas == replicasAvailable)
###Code
!kubectl rollout status deploy/r-mnist-deployment-r-mnist-predictor-d12e455
!seldon-core-api-tester contract.json `minikube ip` `kubectl get svc ambassador -o jsonpath='{.spec.ports[0].nodePort}'` \
seldon-deployment-example --namespace seldon -p
!minikube delete
###Output
_____no_output_____
###Markdown
R MNIST Model * Wrap an R model (using the ```caret``` library) for use as a prediction microservice in seldon-core * Run locally on Docker to test * Deploy on seldon-core running on minikube Dependencies * [Helm](https://github.com/kubernetes/helm) * [Minikube](https://github.com/kubernetes/minikube) * [S2I](https://github.com/openshift/source-to-image) * R```bashpip install seldon-core``` Train locally
###Code
!rm -f *ubyte && ./get_data.sh && Rscript train.R
###Output
--2019-04-24 16:43:55-- http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz
Resolving yann.lecun.com (yann.lecun.com)... 216.165.22.6
Connecting to yann.lecun.com (yann.lecun.com)|216.165.22.6|:80... connected.
HTTP request sent, awaiting response... 200 OK
Length: 9912422 (9.5M) [application/x-gzip]
Saving to: ‘train-images-idx3-ubyte.gz’
train-images-idx3-u 100%[===================>] 9.45M 2.56MB/s in 4.5s
2019-04-24 16:43:59 (2.10 MB/s) - ‘train-images-idx3-ubyte.gz’ saved [9912422/9912422]
--2019-04-24 16:43:59-- http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz
Resolving yann.lecun.com (yann.lecun.com)... 216.165.22.6
Connecting to yann.lecun.com (yann.lecun.com)|216.165.22.6|:80... connected.
HTTP request sent, awaiting response... 200 OK
Length: 28881 (28K) [application/x-gzip]
Saving to: ‘train-labels-idx1-ubyte.gz’
train-labels-idx1-u 100%[===================>] 28.20K --.-KB/s in 0.08s
2019-04-24 16:44:00 (346 KB/s) - ‘train-labels-idx1-ubyte.gz’ saved [28881/28881]
--2019-04-24 16:44:00-- http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz
Resolving yann.lecun.com (yann.lecun.com)... 216.165.22.6
Connecting to yann.lecun.com (yann.lecun.com)|216.165.22.6|:80... connected.
HTTP request sent, awaiting response... 200 OK
Length: 1648877 (1.6M) [application/x-gzip]
Saving to: ‘t10k-images-idx3-ubyte.gz’
t10k-images-idx3-ub 100%[===================>] 1.57M 1.08MB/s in 1.5s
2019-04-24 16:44:01 (1.08 MB/s) - ‘t10k-images-idx3-ubyte.gz’ saved [1648877/1648877]
--2019-04-24 16:44:01-- http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz
Resolving yann.lecun.com (yann.lecun.com)... 216.165.22.6
Connecting to yann.lecun.com (yann.lecun.com)|216.165.22.6|:80... connected.
HTTP request sent, awaiting response... 200 OK
Length: 4542 (4.4K) [application/x-gzip]
Saving to: ‘t10k-labels-idx1-ubyte.gz’
t10k-labels-idx1-ub 100%[===================>] 4.44K --.-KB/s in 0s
2019-04-24 16:44:01 (365 MB/s) - ‘t10k-labels-idx1-ubyte.gz’ saved [4542/4542]
Loading required package: lattice
Loading required package: ggplot2
Loading required package: foreach
Loading required package: iterators
Loading required package: parallel
###Markdown
Wrap model using s2i
###Code
!s2i build . seldonio/seldon-core-s2i-r:0.1 r-mnist:0.1
!docker run --name "mnist_predictor" -d --rm -p 5000:5000 r-mnist:0.1
###Output
5104ca6b9b9b9f8a778c46ae1c9d84a733ea63732dce128066306f7fcc49bb82
###Markdown
Send some random features that conform to the contract
###Code
!seldon-core-tester contract.json 0.0.0.0 5000 -p
!docker rm mnist_predictor --force
###Output
mnist_predictor
###Markdown
Test using Minikube
###Code
!minikube start --memory 4096
###Output
_____no_output_____
###Markdown
Setup Seldon CoreUse the setup notebook to [Setup Cluster](../../seldon_core_setup.ipynbSetup-Cluster) with [Ambassador Ingress](../../seldon_core_setup.ipynbAmbassador) and [Install Seldon Core](../../seldon_core_setup.ipynbInstall-Seldon-Core). Instructions [also online](./seldon_core_setup.html).
###Code
!eval $(minikube docker-env) && s2i build . seldonio/seldon-core-s2i-r:0.1 r-mnist:0.1
!kubectl create -f r_mnist_deployment.json
###Output
seldondeployment.machinelearning.seldon.io/seldon-deployment-example created
###Markdown
Wait until ready (replicas == replicasAvailable)
###Code
!kubectl rollout status deploy/r-mnist-deployment-r-mnist-predictor-d12e455
!seldon-core-api-tester contract.json `minikube ip` `kubectl get svc ambassador -o jsonpath='{.spec.ports[0].nodePort}'` \
seldon-deployment-example --namespace seldon -p
!minikube delete
###Output
_____no_output_____
###Markdown
R MNIST Model * Wrap an R model (using the ```caret``` library) for use as a prediction microservice in seldon-core * Run locally on Docker to test * Deploy on seldon-core running on minikube Dependencies * [Helm](https://github.com/kubernetes/helm) * [Minikube](https://github.com/kubernetes/minikube) * [S2I](https://github.com/openshift/source-to-image) * R```bashpip install seldon-core``` Train locally
###Code
!rm -f *ubyte && ./get_data.sh && Rscript train.R
###Output
--2019-04-24 16:43:55-- http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz
Resolving yann.lecun.com (yann.lecun.com)... 216.165.22.6
Connecting to yann.lecun.com (yann.lecun.com)|216.165.22.6|:80... connected.
HTTP request sent, awaiting response... 200 OK
Length: 9912422 (9.5M) [application/x-gzip]
Saving to: ‘train-images-idx3-ubyte.gz’
train-images-idx3-u 100%[===================>] 9.45M 2.56MB/s in 4.5s
2019-04-24 16:43:59 (2.10 MB/s) - ‘train-images-idx3-ubyte.gz’ saved [9912422/9912422]
--2019-04-24 16:43:59-- http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz
Resolving yann.lecun.com (yann.lecun.com)... 216.165.22.6
Connecting to yann.lecun.com (yann.lecun.com)|216.165.22.6|:80... connected.
HTTP request sent, awaiting response... 200 OK
Length: 28881 (28K) [application/x-gzip]
Saving to: ‘train-labels-idx1-ubyte.gz’
train-labels-idx1-u 100%[===================>] 28.20K --.-KB/s in 0.08s
2019-04-24 16:44:00 (346 KB/s) - ‘train-labels-idx1-ubyte.gz’ saved [28881/28881]
--2019-04-24 16:44:00-- http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz
Resolving yann.lecun.com (yann.lecun.com)... 216.165.22.6
Connecting to yann.lecun.com (yann.lecun.com)|216.165.22.6|:80... connected.
HTTP request sent, awaiting response... 200 OK
Length: 1648877 (1.6M) [application/x-gzip]
Saving to: ‘t10k-images-idx3-ubyte.gz’
t10k-images-idx3-ub 100%[===================>] 1.57M 1.08MB/s in 1.5s
2019-04-24 16:44:01 (1.08 MB/s) - ‘t10k-images-idx3-ubyte.gz’ saved [1648877/1648877]
--2019-04-24 16:44:01-- http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz
Resolving yann.lecun.com (yann.lecun.com)... 216.165.22.6
Connecting to yann.lecun.com (yann.lecun.com)|216.165.22.6|:80... connected.
HTTP request sent, awaiting response... 200 OK
Length: 4542 (4.4K) [application/x-gzip]
Saving to: ‘t10k-labels-idx1-ubyte.gz’
t10k-labels-idx1-ub 100%[===================>] 4.44K --.-KB/s in 0s
2019-04-24 16:44:01 (365 MB/s) - ‘t10k-labels-idx1-ubyte.gz’ saved [4542/4542]
Loading required package: lattice
Loading required package: ggplot2
Loading required package: foreach
Loading required package: iterators
Loading required package: parallel
###Markdown
Wrap model using s2i
###Code
!s2i build . seldonio/seldon-core-s2i-r:0.1 r-mnist:0.1
!docker run --name "mnist_predictor" -d --rm -p 5000:5000 r-mnist:0.1
###Output
5104ca6b9b9b9f8a778c46ae1c9d84a733ea63732dce128066306f7fcc49bb82
###Markdown
Send some random features that conform to the contract
###Code
!seldon-core-tester contract.json 0.0.0.0 5000 -p
!docker rm mnist_predictor --force
###Output
mnist_predictor
###Markdown
Test using Minikube
###Code
!minikube start --memory 4096
###Output
_____no_output_____
###Markdown
Setup Seldon CoreUse the setup notebook to [Setup Cluster](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.htmlSetup-Cluster) with [Ambassador Ingress](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.htmlAmbassador) and [Install Seldon Core](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.htmlInstall-Seldon-Core). Instructions [also online](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html).
###Code
!eval $(minikube docker-env) && s2i build . seldonio/seldon-core-s2i-r:0.1 r-mnist:0.1
!kubectl create -f r_mnist_deployment.json
###Output
seldondeployment.machinelearning.seldon.io/seldon-deployment-example created
###Markdown
Wait until ready (replicas == replicasAvailable)
###Code
!kubectl rollout status deploy/r-mnist-deployment-r-mnist-predictor-d12e455
!seldon-core-api-tester contract.json `minikube ip` `kubectl get svc ambassador -o jsonpath='{.spec.ports[0].nodePort}'` \
seldon-deployment-example --namespace seldon -p
!minikube delete
###Output
_____no_output_____ |
PID.ipynb | ###Markdown
PID Controller Simulation
###Code
# imports we need to graph stuff
%config InlineBackend.figure_format = 'retina'
%matplotlib inline
import matplotlib.pyplot as plt
from matplotlib import animation, rc
import numpy as np
import random
from IPython.display import HTML
###Output
_____no_output_____
###Markdown
Below we define a function that will compute the response $u$ to the currenterror $e$ using the PID formula.A PID controller uses the following equation to compute $u$:$$u = K_P e + K_I \int e \, dt + K_D \frac{de}{dt}$$In this function, `errors` is an array of all the errors $e$ we've recorded so far,`Kp`, `Ki`, `Kd` are the PID constants, and `time_delta` is the amount of time betweenerror measurements.In Python, we can compute the three terms of the PID equation like this:- The proportional term is simply `Kp * current_error`, where `current_error` is the lasterror we've recorded: `errors[-1]`- Because we have a list of discrete error measurements, we can replace the integral with summation:$$K_I \int e \, dt \approx K_I \sum e \Delta t$$In Python: `Ki * sum(errors) * time_delta`- We can approximate the derivative of the error using two consecutive error measurements ($e_1$ and $e_0$):$$K_D \frac{de}{dt} \approx K_D \frac{e_1 - e_0}{\Delta t}$$In Python: `Kd*(current_error - prev_error) / time_delta`
###Code
# This function takes the current error, 3 PID constants,
# and an array of previous responses and returns the PID response
def pid_response(errors, Kp, Ki, Kd, time_delta):
current_error = errors[-1]
if len(errors) > 1:
prev_error = errors[-2]
else:
# if we don't have two error measurements, just reuse
# the first error
prev_error = current_error
proportional = Kp*current_error
integral = Ki* sum(errors) * time_delta
derivative = Kd*(current_error - prev_error) / time_delta
return proportional + integral + derivative
# please ignore
def bang_response(errors, Kp, Ki, Kd, time_delta):
return np.sign(errors[-1]) * 0.5
###Output
_____no_output_____
###Markdown
Next, we need to define the _process_ (also known as plant). The process is essentially the system that acts upon the response given by the PID controller. If we were using a PID controller in the context of a car trying to follow a straight line, the process would receive a steering response from the PID controller (perhaps in the form of an angle) and then act upon this response by physically changing the car's heading.We don't have a physical process to work with here, but we can simulate one instead. The function `process` accepts a PID controller response `res` and the current state of the system `state`.How this process is implemented isn't really relevant, but feel free to read the code:
###Code
p_response = 0
constant_shift = 0
def process(res, state):
global p_response
p_response = p_response*0.96 - 0.06*res + constant_shift
# add "real-world" contraints
p_response = min(0.3, p_response)
p_response = max(-0.3, p_response)
return state + p_response
###Output
_____no_output_____
###Markdown
Here we define the error function. The error function tells us how far our currentstate `s` is from our desired state `d`:
###Code
def error(s, d):
return s - d
def simulate_pid(Kp, Ki, Kd):
time_delta = 0.1
duration = 30
# create a list of times from 0 to `duration` seconds with steps of `time_delta`
# in total, duration / time_delta observations will be recorded
times = np.arange(0, duration, time_delta)
desired = np.zeros(times.size)
desired[100:170] = 1
states = np.zeros(times.size)
states[0] = 3 # initial state
errors = np.zeros(times.size)
errors[0] = error(states[0], desired[0]) # initial error
for i in range(1, len(times)):
res = pid_response(errors[:i], Kp, Ki, Kd, time_delta=time_delta)
state = process(res, states[i - 1])
states[i] = state
errors[i] = error(state, desired[i])
return times, states, desired, errors
fig, ax = plt.subplots()
ax.set_title("PID controller response")
ax.set_xlabel("Time")
ax.set_ylabel("State")
line, = ax.plot([], [])
ax.set_xlim(( 0, 30))
ax.set_ylim((-4, 4))
fmt_str = 'Kp=%.2f\nKi=%.2f\nKd=%.2f'
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
tex = ax.text(0.78, 0.95, '', transform=ax.transAxes, fontsize=14, verticalalignment='top')
desired = np.zeros(30 * 10)
desired[100:170] = 1
ax.plot(np.arange(0, 30, 0.1), desired)
def animate(i):
Kp = 1.5 - min(i,100)*0.01
Kd = 0
if i > 100:
Kd = min((i-100)/2.0, 30) * 0.01
t, s, d, e = simulate_pid(Kp, 0, Kd)
line.set_data(t, s)
s = fmt_str % (Kp,0,Kd)
tex.set_text(s)
return (line,)
anim = animation.FuncAnimation(fig, animate,
frames=250, interval=20, blit=True)
# display HTML
HTML(anim.to_html5_video())
# save as GIF
anim.save('pid_animation.gif', writer='imagemagick')
###Output
_____no_output_____
###Markdown
PID Controller Simulation
###Code
# imports we need to graph stuff
%config InlineBackend.figure_format = 'retina'
%matplotlib inline
import matplotlib.pyplot as plt
from matplotlib import animation, rc
import numpy as np
import random
from IPython.display import HTML
###Output
_____no_output_____
###Markdown
Below we define a function that will compute the response $u$ to the currenterror $e$ using the PID formula.A PID controller uses the following equation to compute $u$:$$u = K_P e + K_I \int e \, dt + K_D \frac{de}{dt}$$In this function, `errors` is an array of all the errors $e$ we've recorded so far,`Kp`, `Ki`, `Kd` are the PID constants, and `time_delta` is the amount of time betweenerror measurements.In Python, we can compute the three terms of the PID equation like this:- The proportional term is simply `Kp * current_error`, where `current_error` is the lasterror we've recorded: `errors[-1]`- Because we have a list of discrete error measurements, we can replace the integral with summation:$$K_I \int e \, dt \approx K_I \sum e \Delta t$$In Python: `Ki * sum(errors) * time_delta`- We can approximate the derivative of the error using two consecutive error measurements ($e_1$ and $e_0$):$$K_D \frac{de}{dt} \approx K_D \frac{e_1 - e_0}{\Delta t}$$In Python: `Kd*(current_error - prev_error) / time_delta`
###Code
# This function takes the current error, 3 PID constants,
# and an array of previous responses and returns the PID response
def pid_response(errors, Kp, Ki, Kd, time_delta):
current_error = errors[-1]
if len(errors) > 1:
prev_error = errors[-2]
else:
# if we don't have two error measurements, just reuse
# the first error
prev_error = current_error
proportional = Kp*current_error
integral = Ki* sum(errors) * time_delta
derivative = Kd*(current_error - prev_error) / time_delta
return proportional + integral + derivative
# please ignore
def bang_response(errors, Kp, Ki, Kd, time_delta):
return np.sign(errors[-1]) * 0.5
###Output
_____no_output_____
###Markdown
Next, we need to define the _process_ (also known as plant). The process is essentially the system that acts upon the response given by the PID controller. If we were using a PID controller in the context of a car trying to follow a straight line, the process would receive a steering response from the PID controller (perhaps in the form of an angle) and then act upon this response by physically changing the car's heading.We don't have a physical process to work with here, but we can simulate one instead. The function `process` accepts a PID controller response `res` and the current state of the system `state`.How this process is implemented isn't really relevant, but feel free to read the code:
###Code
p_response = 0
constant_shift = 0.05
def process(res, state):
global p_response
p_response = p_response*0.96 - 0.06*res + constant_shift
# add "real-world" contraints
p_response = min(0.3, p_response)
p_response = max(-0.3, p_response)
return state + p_response
###Output
_____no_output_____
###Markdown
Here we define the error function. The error function tells us how far our currentstate `s` is from our desired state `d`:
###Code
def error(s, d):
return s - d
def simulate_pid(Kp, Ki, Kd):
time_delta = 0.1
duration = 30
# create a list of times from 0 to `duration` seconds with steps of `time_delta`
# in total, duration / time_delta observations will be recorded
times = np.arange(0, duration, time_delta)
desired = np.zeros(times.size)
desired[100:170] = 1
states = np.zeros(times.size)
states[0] = 3 # initial state
errors = np.zeros(times.size)
errors[0] = error(states[0], desired[0]) # initial error
for i in range(1, len(times)):
res = pid_response(errors[:i], Kp, Ki, Kd, time_delta=time_delta)
state = process(res, states[i - 1])
states[i] = state
errors[i] = error(state, desired[i])
return times, states, desired, errors
fig, ax = plt.subplots()
ax.set_title("PID controller response")
ax.set_xlabel("Time")
ax.set_ylabel("State")
line, = ax.plot([], [])
ax.set_xlim(( 0, 30))
ax.set_ylim((-4, 4))
fmt_str = 'Kp=%.2f\nKi=%.2f\nKd=%.2f'
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
tex = ax.text(0.78, 0.95, '', transform=ax.transAxes, fontsize=14, verticalalignment='top')
desired = np.zeros(30 * 10)
desired[100:170] = 1
ax.plot(np.arange(0, 30, 0.1), desired)
def animate(i):
Kp = 1.5 - min(i,100)*0.01
Kd = 0
if i > 100:
Kd = min((i-100)/2.0, 30) * 0.01
t, s, d, e = simulate_pid(Kp,0.1, Kd)
line.set_data(t, s)
s = fmt_str % (Kp,0.1,Kd)
tex.set_text(s)
return (line,)
anim = animation.FuncAnimation(fig, animate,
frames=250, interval=20, blit=True)
# display HTML
HTML(anim.to_html5_video())
# save as GIF
anim.save('pid_animation_Ki_-1.gif', writer='imagemagick')
###Output
MovieWriter imagemagick unavailable; using Pillow instead.
###Markdown
PID Controller Simulation
###Code
# imports we need to graph stuff
%config InlineBackend.figure_format = 'retina'
%matplotlib inline
import matplotlib.pyplot as plt
from matplotlib import animation, rc
import numpy as np
import random
from IPython.display import HTML
###Output
_____no_output_____
###Markdown
Below we define a function that will compute the response $u$ to the currenterror $e$ using the PID formula.A PID controller uses the following equation to compute $u$:$$u = K_P e + K_I \int e \, dt + K_D \frac{de}{dt}$$In this function, `errors` is an array of all the errors $e$ we've recorded so far,`Kp`, `Ki`, `Kd` are the PID constants, and `time_delta` is the amount of time betweenerror measurements.In Python, we can compute the three terms of the PID equation like this:- The proportional term is simply `Kp * current_error`, where `current_error` is the lasterror we've recorded: `errors[-1]`- Because we have a list of discrete error measurements, we can replace the integral with summation:$$K_I \int e \, dt \approx K_I \sum e \Delta t$$In Python: `Ki * sum(errors) * time_delta`- We can approximate the derivative of the error using two consecutive error measurements ($e_1$ and $e_0$):$$K_D \frac{de}{dt} \approx K_D \frac{e_1 - e_0}{\Delta t}$$In Python: `Kd*(current_error - prev_error) / time_delta`
###Code
# This function takes the current error, 3 PID constants,
# and an array of previous responses and returns the PID response
def pid_response(errors, Kp, Ki, Kd, time_delta):
current_error = errors[-1]
if len(errors) > 1:
prev_error = errors[-2]
else:
# if we don't have two error measurements, just reuse
# the first error
prev_error = current_error
proportional = Kp*current_error
integral = Ki* sum(errors) * time_delta
derivative = Kd*(current_error - prev_error) / time_delta
return proportional + integral + derivative
# please ignore
def bang_response(errors, Kp, Ki, Kd, time_delta):
return np.sign(errors[-1]) * 0.5
###Output
_____no_output_____
###Markdown
Next, we need to define the _process_ (also known as plant). The process is essentially the system that acts upon the response given by the PID controller. If we were using a PID controller in the context of a car trying to follow a straight line, the process would receive a steering response from the PID controller (perhaps in the form of an angle) and then act upon this response by physically changing the car's heading.We don't have a physical process to work with here, but we can simulate one instead. The function `process` accepts a PID controller response `res` and the current state of the system `state`.How this process is implemented isn't really relevant, but feel free to read the code:
###Code
p_response = 0
constant_shift = 0
def process(res, state):
global p_response
p_response = p_response*0.96 - 0.06*res + constant_shift
# add "real-world" contraints
p_response = min(0.3, p_response)
p_response = max(-0.3, p_response)
return state + p_response
###Output
_____no_output_____
###Markdown
Here we define the error function. The error function tells us how far our currentstate `s` is from our desired state `d`:
###Code
def error(s, d):
return s - d
def simulate_pid(Kp, Ki, Kd):
time_delta = 0.1
duration = 30
# create a list of times from 0 to `duration` seconds with steps of `time_delta`
# in total, duration / time_delta observations will be recorded
times = np.arange(0, duration, time_delta)
desired = np.zeros(times.size)
desired[100:170] = 1
states = np.zeros(times.size)
states[0] = 3 # initial state
errors = np.zeros(times.size)
errors[0] = error(states[0], desired[0]) # initial error
for i in range(1, len(times)):
res = pid_response(errors[:i], Kp, Ki, Kd, time_delta=time_delta)
state = process(res, states[i - 1])
states[i] = state
errors[i] = error(state, desired[i])
return times, states, desired, errors
fig, ax = plt.subplots()
ax.set_title("PID controller response")
ax.set_xlabel("Time")
ax.set_ylabel("State")
line, = ax.plot([], [])
ax.set_xlim(( 0, 30))
ax.set_ylim((-4, 4))
fmt_str = 'Kp=%.2f\nKi=%.2f\nKd=%.2f'
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
tex = ax.text(0.78, 0.95, '', transform=ax.transAxes, fontsize=14, verticalalignment='top')
desired = np.zeros(30 * 10)
desired[100:170] = 1
ax.plot(np.arange(0, 30, 0.1), desired)
def animate(i):
Kp = 1.5 - min(i,100)*0.01
Kd = 0
if i > 100:
Kd = min((i-100)/2.0, 30) * 0.01
t, s, d, e = simulate_pid(Kp, 0, Kd)
line.set_data(t, s)
s = fmt_str % (Kp,0,Kd)
tex.set_text(s)
return (line,)
anim = animation.FuncAnimation(fig, animate,
frames=250, interval=20, blit=True)
# display HTML
HTML(anim.to_html5_video())
# save as GIF
# anim.save('pid_animation.gif', writer='imagemagick')
###Output
_____no_output_____
###Markdown
AnimationHere the blue line is the actual state and green is the desired state. Exercises - Try changing `constant_shift` (just above the `process` function). See how it affects the PID controller response graph. Then use the `Ki` constant to mitigate the constant shift.
###Code
def plt_pid(Kp, Ki, Kd):
fig, ax = plt.subplots()
ax.set_title("PID controller response")
ax.set_xlabel("Time")
ax.set_ylabel("State")
line, = ax.plot([], [])
ax.set_xlim(( 0, 30))
ax.set_ylim((-4, 4))
fmt_str = 'Kp=%.2f\nKi=%.2f\nKd=%.2f'
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
tex = ax.text(0.78, 0.95, '', transform=ax.transAxes, fontsize=14, verticalalignment='top')
desired = np.zeros(30 * 10)
desired[100:170] = 1
ax.plot(np.arange(0, 30, 0.1), desired)
t, s, d, e = simulate_pid(Kp, Ki, Kd)
line.set_data(t, s)
s = fmt_str % (Kp,Ki,Kd)
tex.set_text(s)
###Output
_____no_output_____
###Markdown
Answer for 1st questionBy increasing KI value, it can cause the present value to overshoot the setpoint value.
###Code
plt_pid(0.5, 0, 0.3)
plt_pid(0.5, 0.5, 0.3)
###Output
_____no_output_____
###Markdown
Answer for 2nd questionIf we adjust the constant_shift value, the PID controller will manage to fit the state to the desired state that but with shifted offset. This shows that in the real world, only P term may cause the state not reach the desired state due to very low error, so we have to adjust the KI value to shift the offset, but it also cause overshooting
###Code
constant_shift = -0.03
plt_pid(0.5, 0, 0.3)
plt_pid(0.5, 0.5, 0.3)
###Output
_____no_output_____
###Markdown
Answer for 3rd questionThis can be done by modify some value in simulate_pid function* time_delta from 1 to 0.05* duration from 30 to 60* desired array range from [100:170] to [200:340] assigned with 1
###Code
constant_shift = 0
def simulate_pid(Kp, Ki, Kd):
time_delta = 0.05
duration = 60
# create a list of times from 0 to `duration` seconds with steps of `time_delta`
# in total, duration / time_delta observations will be recorded
times = np.arange(0, duration, time_delta)
desired = np.zeros(times.size)
desired[200:340] = 1
states = np.zeros(times.size)
states[0] = 3 # initial state
errors = np.zeros(times.size)
errors[0] = error(states[0], desired[0]) # initial error
for i in range(1, len(times)):
res = pid_response(errors[:i], Kp, Ki, Kd, time_delta=time_delta)
state = process(res, states[i - 1])
states[i] = state
errors[i] = error(state, desired[i])
return times, states, desired, errors
plt_pid(0.5, 0, 0.3)
###Output
_____no_output_____
###Markdown
Controller Base Class
###Code
class Controller:
# class type indication
_ctype = "Controller"
# Define the class initalization sequence. Constructor
def __init__(self, start_time = 0):
self._start_time = start_time
self._last_timestamp = 0
self._set_point = 0
self._u = 0.0 # this is the input to the plant
self._hist_u_p = [0] # control effort history
# Set the altitude set point
def setTarget(self, target):
self._set_point = float(target)
# Retrive the current control effort
def getPlantInput(self, time):
self._last_timestamp = time
self._hist_u_p.append(self._u)
return self._u
# get the class type
def getType(self):
return self._ctype
###Output
_____no_output_____
###Markdown
Open Controller Class. Inherited from the Basic Controller Class
###Code
class OpenController(Controller):
# Define the class initalization sequence. Constructor
def __init__(self, start_time = 0):
Controller.__init__(self, start_time)
self._ctype = "OpenController"
# Set the desired control effort
def setControlEffort(self, control_effort):
self._u = float(control_effort)
###Output
_____no_output_____
###Markdown
Proportional Controller Class
###Code
class PController(Controller):
def __init__(self, kp = 0.0, start_time = 0):
Controller.__init__(self, start_time)
self._kp = float(kp)
self._ctype = "PController"
# set the proportional constant
def setKP(self, kp):
self._kp = float(kp)
# calculate the error and the input to the plant (Overriding the base class behaviour)
def getPlantInput(self, measured_value, time):
self._last_timestamp = time
error = self._set_point - measured_value
self._u = self._kp * error
self._hist_u_p.append(self._u)
return self._u
###Output
_____no_output_____
###Markdown
Proportional-Integratl Controller (PI - Controller)
###Code
class PIController(PController):
def __init__(self, kp = 0.0, ki = 0.0, start_time = 0):
PController.__init__(self, kp, start_time)
self._ki = float(ki)
self._error_sum= 0.0
self._ctype = "PIController"
self._hist_u_i = [0] # Control effort history
def setKI(self, ki):
self._ki = ki
def getPlantInput(self, measured_value, time):
delta_time = time - self._last_timestamp
if delta_time == 0:
return 0
self._last_timestamp = time
error = self._set_point - measured_value
# Proportional error
perr = self._kp * error
# Calculate the error_sum (accumulate the integral error)
self._error_sum += error * delta_time
# Calculate the integral error here
ierr = self._ki * self._error_sum
# Set the control effort
u = perr + ierr
# Here we are storing the control effort history for post control
self._hist_u_p.append(perr)
self._hist_u_i.append(ierr)
return u
###Output
_____no_output_____
###Markdown
Proportional-Derivative Controller (PD - Controller)
###Code
class PDController(PController):
def __init__(self, kp = 0.0, kd = 0.0, start_time = 0):
PController.__init__(self, kp, start_time)
self._kd = float(kd)
self._error_sum = 0.0
self._last_error = 0.0
self._ctype = "PDController"
self._hist_u_d = [0] # Control effort history
def setKD(self, kd):
self._kd = float(kd)
def getPlantInput(self, measured_value, time):
delta_time = time - self._last_timestamp
if delta_time == 0:
return 0
# Calculate the error
error = self._set_point - measured_value
# Set the last_timestamp_
self._last_timestamp = time
# Find error_sum_
self._error_sum += error * delta_time
# Calculate the delta_error
delta_error = error - self._last_error
# Update the past error with the current error
self._last_error = error
# Proportional error
perr = self._kp * error
# Calculate the derivative error here. Be sure to access the
derr = self._kd * (delta_error / delta_time)
# Set the control effort
u = perr + derr
# Here we are storing the control effort history for post control
# observations.
self._hist_u_p.append(perr)
self._hist_u_d.append(derr)
return u
###Output
_____no_output_____
###Markdown
Proportional-Integrative-Derivative Controller (PID - Controller)
###Code
class PIDController(PController):
def __init__(self, kp = 0.0, kd = 0.0, ki = 0.0, start_time = 0):
PController.__init__(self, kp, start_time)
self._kd = float(kd)
self._ki = float(ki)
self._error_sum = 0.0
self._last_error = 0.0
self._ctype = "PIDController"
self._hist_u_d = [0] # Control effort history
self._hist_u_i = [0] # Control effort history
def setKI(self, ki):
self._ki = float(ki)
def setKD(self, kd):
self._kd = float(kd)
def getPlantInput(self, measured_value, time):
delta_time = time - self._last_timestamp
if delta_time == 0:
return 0
# Calculate the error
error = self._set_point - measured_value
# Set the last_timestamp_
self._last_timestamp = time
# Sum the errors
self._error_sum += error * delta_time
# Calculate the delta_error
delta_error = error - self._last_error
# Update the past error with the current error
self._last_error = error
# Proportional error
perr = self._kp * error
# Integral error
ierr = self._ki * self._error_sum
# Derivative error
derr = self._kd * (delta_error / delta_time)
# Set the control effort
u = perr + ierr + derr
# Here we are storing the control effort history for post control
# observations.
self._hist_u_p.append(perr)
self._hist_u_i.append(ierr)
self._hist_u_d.append(derr)
return u
###Output
_____no_output_____
###Markdown
PID - Controller with anti-windup and derivative smoothing effects
###Code
class PIDOptimalController(PIDController):
def __init__(self, kp = 0.0, kd = 0.0, ki = 0.0, start_time = 0, max_windup = 20,
alpha = 1.0, u_bounds = [float('-inf'), float('inf')]):
PIDController.__init__(self, kp, kd, ki, start_time)
# Set max wind up
self._max_windup = float(max_windup)
# Set alpha for derivative filter smoothing factor
self.alpha = float(alpha)
# Set the controller type
self._ctype = "PIDOptimalController"
# Setting control effort saturation limits
self.umin = u_bounds[0]
self.umax = u_bounds[1]
# Add a reset function to clear the class variables
def reset(self):
self._set_point = 0.0
self._kp = 0.0
self._ki = 0.0
self._kd = 0.0
self._error_sum = 0.0
self._last_timestamp = 0.0
self._last_error = 0
self._last_last_error = 0
self._last_windup = 0.0
# Create function to set max_windup_
def setMaxWindup(self, max_windup):
self._max_windup = int(max_windup)
def getPlantInput(self, measured_value, time):
delta_time = time - self._last_timestamp
if delta_time == 0:
return 0
# Calculate the error
error = self._set_point - measured_value
# Set the last_timestamp_
self._last_timestamp = time
# Sum the errors
self._error_sum += error * delta_time
# Calculate the delta_error
delta_error = error - self._last_error
# Update the past error with the current error
self._last_error = error
# Address the max windup
########################################
if self._error_sum > self._max_windup:
self._error_sum = self._max_windup
elif self._error_sum < -self._max_windup:
self._error_sum = -self._max_windup
########################################
# Proportional error
perr = self._kp * error
# Integral error
ierr = self._ki * self._error_sum
# Recalculate the derivative error here incorporating derivative smoothing!
derr = self._kd * (self.alpha * delta_error / delta_time + (1 - self.alpha) * self._last_error)
# Set the control effort
u = perr + ierr + derr
# Enforce actuator saturation limits
if u > self.umax:
u = self.umax
elif u < self.umin:
u = self.umin
# Here we are storing the control effort history for post control
# observations.
self._hist_u_p.append(perr)
self._hist_u_i.append(ierr)
self._hist_u_d.append(derr)
return u
###Output
_____no_output_____
###Markdown
Dynamic Equation of the System
###Code
def ydot(y, t, controller):
''' Returns the state vector at the next time-step
Parameters:
----------
y(k) = state vector, a length 2 list
= [altitude, speed]
t = time, (sec)
pid = instance of the PIDController class
Returns
-------
y(k+1) = [y[0], y[1]] = y(k) + ydot*dt
'''
# Model state
#====================================
y0 = y[0] # altitude, (m)
y1 = y[1] # speed, (m/s)
# Model parameters
#====================================
g = -9.81 # gravity, m/s/s
m = 1.54 # quadrotor mass, kg
c = 10.0 # electro-mechanical transmission constant
# time step, (sec)
#====================================
dt = t - controller._last_timestamp
# Control effort
#====================================
if controller.getType() == "OpenController":
u = controller.getPlantInput(t)
elif controller.getType() == "PController":
u = controller.getPlantInput(y0, t)
elif controller.getType() == "PIController":
u = controller.getPlantInput(y0, t)
elif controller.getType() == "PDController":
u = controller.getPlantInput(y0, t)
elif controller.getType() == "PIDController":
u = controller.getPlantInput(y0, t)
elif controller.getType() == "PIDOptimalController":
u = controller.getPlantInput(y0, t)
### State derivatives
#====================================
if (y0 <= 0.):
# if control input, u <= gravity, vehicle stays at rest on the ground
# this prevents quadrotor from "falling" through the ground when thrust is
# too small.
if u <= np.absolute(g*m/c):
y0dot = 0.
y1dot = 0.
else: # else if u > gravity and quadrotor accelerates upwards
y0dot = y1
y1dot = g + c/m*u - 0.75*y1
else: # otherwise quadrotor is already in the air
y0dot = y1
y1dot = g + c/m*u - 0.75*y1
y0 += y0dot*dt
y1 += y1dot*dt
# add noise to the altitude
if controller.getType() == "PIDOptimalController":
sigma = 0.1
y0 = np.random.normal(y0, sigma, 1)
return [y0, y1]
###Output
_____no_output_____
###Markdown
Simulation Parameter
###Code
cf = 1.7 # control effort in case of open loop controller
kp = 0.5 # proportional constant
ki = 0.11 # integral constant
kd = 0.2 # integral constant
umax = 5.0 # max controller output, (N)
alpha = 1.0 # derivative filter smoothing factor
N = 600 # number of simulation points
t0 = 0 # starting time, (sec)
tf = 45 # end time, (sec)
time = np.linspace(t0, tf, N)
dt = time[1] - time[0] # delta t, (sec)
###Output
_____no_output_____
###Markdown
Initializzation
###Code
# Inital conditions (i.e., initial state vector)
y = [0, 0]
#y[0] = initial altitude, (m)
#y[1] = initial speed, (m/s)
# Initialize array to store values, array of solution points
soln = np.zeros((len(time),len(y)))
###Output
_____no_output_____
###Markdown
Select Controller Type for Simulation
###Code
# Create instance of Open_Controller class
#controller = OpenController()
#controller = PController(kp = kp)
#controller = PIController(kp = kp, ki = ki)
#controller = PDController(kp = kp, kd = kd)
controller = PIDController(kp = kp, ki = ki, kd = kd)
#controller = PIDOptimalController(kp = kp, ki = ki, kd = kd, max_windup = 1e6, u_bounds = [0, umax], alpha = alpha)
# Set altitude target
r = 10 # meters
controller.setTarget(r)
###Output
_____no_output_____
###Markdown
Simulation (run)
###Code
j = 0 # dummy counter
for t in time:
# Evaluate state at next time point
y = ydot(y,t,controller)
# Store results
soln[j,:] = y
j += 1
###Output
_____no_output_____
###Markdown
Plot Results
###Code
%matplotlib notebook
# Plot 1: This is the altitude of our quad copter as a function of time!
SP = np.ones_like(time)*r # altitude set point
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax1.plot(time, soln[:,0],time,SP,'--')
ax1.set_xlabel('Time, (sec)')
ax1.set_ylabel('Altitude, (m)')
# Plot 2: This is the speed of our quad copter as a function of time!
ax2 = fig.add_subplot(212)
ax2.plot(time, soln[:,1])
ax2.set_xlabel('Time, (sec)')
ax2.set_ylabel('Speed, (m/s)')
plt.tight_layout()
plt.show()
# Plot 3: This is the control effort applied to our quad copter as a function of time!
fig2 = plt.figure()
ax3 = fig2.add_subplot(111)
if controller.getType() == "OpenController":
ax3.plot(time, controller._hist_u_p, label='Open Effort', linewidth=3, color = 'red')
if controller.getType() == "PController":
ax3.plot(time, controller._hist_u_p, label='P Effort', linewidth=3, color = 'red')
if controller.getType() == "PIController":
ax3.plot(time, controller._hist_u_p, label='P Effort', linewidth=3, color = 'red')
ax3.plot(time, controller._hist_u_i, label='I Effort', linewidth=3, color = 'blue')
if controller.getType() == "PDController":
ax3.plot(time, controller._hist_u_p, label='P Effort', linewidth=3, color = 'red')
ax3.plot(time, controller._hist_u_d, label='D Effort', linewidth=3, color = 'blue')
if controller.getType() == "PIDController" or controller.getType() == "PIDOptimalController":
ax3.plot(time, controller._hist_u_p, label='P Effort', linewidth=3, color = 'red')
ax3.plot(time, controller._hist_u_d, label='D Effort', linewidth=3, color = 'blue')
ax3.plot(time, controller._hist_u_i, label='I Effort', linewidth=3, color = 'green')
ax3.set_xlabel('Time, (sec)')
ax3.set_ylabel('Control Effort')
h, l = ax3.get_legend_handles_labels()
ax3.legend(h, l)
plt.tight_layout()
plt.show()
##################
y0 = soln[:,0] #altitude
rise_time_index = np.argmax(y0>r)
RT = time[rise_time_index]
print("The rise time is {0:.3f} seconds".format(RT))
OS = (np.max(y0) - r)/r*100
if OS < 0:
OS = 0
print("The percent overshoot is {0:.1f}%".format(OS))
print ("The offset from the target at 30 seconds is {0:.3f} meters".format(abs(soln[-1,0]-r)))
###Output
_____no_output_____
###Markdown
PIDEl controlador PID se compone de:- Una acción Proporcional para brindar velocidad de respuesta y disminuir el error de estado estable- Una acción Integral para eliminar el error de estado estable- Una acción Derivativa para encargarse de errores cuando hay un cambio en el valor absoluto del error teniendo en cuenta que si el error es constante, solo actúan los modos proporcional e integral. La función de transferencia del PID es:$$K_C\left ( 1 + \frac{1}{\tau_I s}+ \tau_D s \right )$$La forma anterior no es realizable debido a la acción derivativa. Por esta razon, es común usar la forma:$$K_C\left ( 1 + \frac{1}{\tau_I s}+ \frac{\tau_D s}{\alpha \tau_I s+1} \right )$$donde se escoge $\alpha$ de manera que la parte derivativa sea una "buena" aproximación de la derivada. Diseño directoEsta técnica parte del supuesto de definir el comportamiento deseado para el sistema en lazo cerrado, escribir una función de transferencia de lazo cerrado de una forma apropiada y despejar el compensadorPartiendo del diagrama anterior, se define la relación deseada para $\frac{Y}{Y_{SP}}$ considerando $D=0$. Después se despeja $G_C$.
###Code
s, G_C = sympy.symbols('s, G_C')
G_C
###Output
_____no_output_____
###Markdown
Se parte de un modelo de primer orden más tiempo muerto como expresión deseada de $\frac{Y}{Y_{SP}}$.
###Code
tau_c, phi = sympy.symbols('tau_c, phi', positive=True, nonzero=True)
desired_Y_over_Y_sp = sympy.exp(phi*s)/(tau_c*s + 1)
desired_Y_over_Y_sp
###Output
_____no_output_____
###Markdown
Recuerde la forma de la respuesta del sistema ante un escalón.
###Code
from ipywidgets import interact
t = sympy.Symbol('t', positive=True)
def plotresponse(theta=(0, 3.), tau_c_in=(1., 5.)):
desired_response = sympy.inverse_laplace_transform(desired_Y_over_Y_sp.subs({phi: -theta, tau_c: tau_c_in})/s, s, t)
p = sympy.plot(desired_response, (t, 0, 10), show=False)
p2 = sympy.plot(1, (t, 0, 10), show=False)
p.append(p2[0])
p.show()
interact(plotresponse);
###Output
_____no_output_____
###Markdown
A partir de un modelo aproximado para el proceso $\widetilde{G_P}$, puede calcularse la función de transferencia de lazo cerrado.
###Code
GPtilde = sympy.Symbol(r'\widetilde{G_P}')
actual_Y_over_Y_sp = GPtilde*G_C/(1 + GPtilde*G_C)
actual_Y_over_Y_sp
###Output
_____no_output_____
###Markdown
En este punto, podría encontrarse el compensador al resolver la igualdad con la función de transferencia deseada para el sistema en lazo cerrado.
###Code
G_C_solved, = sympy.solve(desired_Y_over_Y_sp - actual_Y_over_Y_sp, G_C)
G_C_solved
###Output
_____no_output_____
###Markdown
Esta función de transferencia tiene $e^{\phi s}$ en el numerador y en el denominador. - El significado de la exponencial en el numerador es de desplazamiento temporal (tiempo muerto).- El denominador se aproxima mediante una expansión de serie de Taylor de primer orden.
###Code
denom = sympy.denom(G_C_solved)
G_C_rational = G_C_solved*denom/denom.subs(sympy.exp(phi*s), 1 + phi*s)
G_C_rational
###Output
_____no_output_____
###Markdown
Se define la forma para el compensador PID$$K_C\left ( 1 + \frac{1}{\tau_I s} + \tau_D s \right )$$
###Code
K_C, tau_I, tau_D = sympy.symbols('K_C, tau_I, tau_D', positive=True, nonzero=True)
PID = K_C*(1 + 1/(tau_I*s) + tau_D*s)
PID.expand().together()
###Output
_____no_output_____
###Markdown
Aunque la forma anterior no es realizable debido a la acción derivativa. Por esta razon, es común usar la forma:$$K_C\left ( 1 + \frac{1}{\tau_I s}+ \frac{\tau_D s}{\alpha \tau_I s+1} \right )$$
###Code
alpha = sympy.symbols('alpha')
ISA = K_C*(1 + 1/(tau_I*s) + tau_D*s/(alpha*tau_D*s + 1))
ISA.expand().together().simplify()
###Output
_____no_output_____
###Markdown
El objetivo es hallar los parámetros del compensador para satisfacer la expresión racional $G_C$
###Code
G_C_rational
###Output
_____no_output_____
###Markdown
Se requiere un modelo de proceso $G$. En este ejemplo se usará un modelo de segundo orden con tiempo muerto.
###Code
K, tau, zeta, phi,= sympy.symbols('K, tau, zeta, phi', positive=True)
G2 = K*sympy.exp(phi*s)/((tau*s)**2 + 2*tau*zeta*s + 1)
G2
###Output
_____no_output_____
###Markdown
Se incorpora el modelo del sistema a la expresión del compensador.
###Code
target_G_C = G_C_rational.subs(GPtilde, G2).expand().together()
target_G_C
###Output
_____no_output_____
###Markdown
Se buscan los parámetros del PID que conformen el compensador despejado.
###Code
# diferencia entre compensadores determinados
zeroeq = (target_G_C - PID).simplify()
zeroeq
numer, denom = zeroeq.as_numer_denom()
eq = sympy.poly(numer, s)
eqs = eq.coeffs()
sympy.solve(eqs, [K_C, tau_D, tau_I])
###Output
_____no_output_____
###Markdown
La solución obtenida muestra:$$K_C = -\frac{2\tau \zeta}{K(\phi-\tau_C)}$$$$\tau_D = \frac{\tau}{2\zeta}$$$$\tau_I = 2\tau\zeta$$Los parámetros del compensador quedan completamente definidos a partir de un modelo aproximado del proceso$$\frac{K}{(\tau s)^2 + 2\tau \zeta s + 1} \cdot e^{s\phi}$$Y una respuesta deseada$$\frac{e^{\phi s}}{\tau_c s + 1}$$ **Ejemplo**Ajuste un controlador PID para un sistema que fue excitado con un escalón unitario y su respuesta fue registrada.
###Code
GP = control.tf([1, 2], [2, 3, 4, 1])
GP
df = pd.read_csv('Datos\DatoEscalon.csv')
df.head()
ym = df.Y
ts = df.tiempo
interact(resultplot2, K=(0.0, 10.0), tau=(0., 10.),
theta=(0., 10.), zeta = (0, 10.0), y0=(0., 10.));
p_inicial = [2, 1, 1.1, 0.5, 4.9]
[K_1, tau_1, zeta_1, theta_1, y0_1], pcov = scipy.optimize.curve_fit(sopdt, ts, ym, p_inicial)
[K_1, tau_1, zeta_1, theta_1, y0_1]
###Output
C:\Users\Usuario\.conda\envs\SistDin\lib\site-packages\pandas\core\arraylike.py:364: RuntimeWarning: overflow encountered in cosh
result = getattr(ufunc, method)(*inputs, **kwargs)
C:\Users\Usuario\.conda\envs\SistDin\lib\site-packages\pandas\core\arraylike.py:364: RuntimeWarning: overflow encountered in sinh
result = getattr(ufunc, method)(*inputs, **kwargs)
###Markdown
Se define el comportamiento deseado.
###Code
tauC = 1.2
###Output
_____no_output_____
###Markdown
Se calculan los parámetros del PID
###Code
sympy.solve(eqs, [K_C, tau_D, tau_I])
KC1 = -2*tau_1*zeta_1/(K_1*(-theta_1 - tauC))
TD1 = tau_1/(2*zeta_1)
TI1 = 2*tau_1*zeta_1
[KC1,TI1,TD1]
PID1 = KC1*(1 + 1/(TI1*s) + TD1*s)
PID1.expand().together()
GC_PID1 = KC1*(1 + control.tf(1,[TI1,0]) + control.tf([TD1,0],1))
GC_PID1
_, yLA = control.step_response(GP,ts)
_, yLC1 = control.step_response(control.feedback(GP,1),ts)
_, yLCPID1 = control.step_response(control.feedback(GC_PID1*GP,1),ts)
plt.plot(ts,yLA,
ts,yLC1,
ts,yLCPID1)
plt.legend(('Lazo abierto','Realimentado','con PID directo'))
plt.grid()
###Output
_____no_output_____
###Markdown
Observe que se presentan oscilaciones aunque la respuesta deseada se modeló como un sistema de primer orden con tiempo muerto. - ¿A qué se debe esta discrepancia?- ¿Qué puntos débiles puede tener esta metodología? ------ Reglas de sintonía[Ziegler y Nichols](https://controlautomaticoeducacion.com/control-realimentado/ziegler-nichols-sintonia-de-control-pid/) propusieron métodos para sintonizar un controlador PID. En lazo abiertoEl método está desarrollado para sistemas cuya respuesta ante un escalón unitario tienen forma de $S$., es decir, sin sobreimpulsos ni dinámicas inestables. En este caso, el sistema puede aproximarse mediante un modelo de primer orden y tiempo muerto:$$\frac{K}{\tau s + 1}e^{-Ls}$$como se muestra en la figura.A partir de este modelo, se sugieren los siguientes valores para los compensadores:| Controlador | $K_C$ | $\tau_I$ | $\tau_D$ ||-------------|---------------------|----------------|----------|| P |$\frac{\tau}{KL}$ | $\infty$ | $0$ || PI |$0.9\frac{\tau}{KL}$ |$\frac{L}{0.3}$ | $0$ || PID |$1.2\frac{\tau}{KL}$ |$2L$ | $0.5L$ |
###Code
ym = df.Y
ts = df.tiempo
interact(resultplot1, K=(0.0, 10.0), tau=(0., 10.),
theta=(0., 10.), y0=(0., 10.));
p_inicial = [1.8, 2.0, 1.0, 5.0]
[K_2, tau_2, theta_2, y0_2], pcov = scipy.optimize.curve_fit(fopdt, ts, ym, p_inicial)
[K_2, tau_2, theta_2, y0_2]
###Output
_____no_output_____
###Markdown
A partir del modelo, se calculan los parámetros del PID.$$K_C = 1.2 \frac{\tau}{KL}$$$$\tau_I = 2 L$$$$\tau_D = 0.5 L$$
###Code
KC2 = 1.2*tau_2/(K_2*theta_2)
TI2 = 2*theta_2
TD2 = 0.5*theta_2
[KC2,TI2,TD2]
GC_PID2 = KC2*(1 + control.tf(1,[TI2,0]) + control.tf([TD2,0],[0.001*TD2,1]))
GC_PID2
_, yLCPID2 = control.step_response(control.feedback(GC_PID2*GP,1),ts)
plt.plot(ts,yLA,
ts,yLC1,
ts,yLCPID1,
ts,yLCPID2)
plt.legend(('Lazo abierto','Realimentado','PID directo','ZyN abierto'))
plt.grid()
###Output
_____no_output_____
###Markdown
En lazo cerradoEl método está desarrollado para sistemas que lleguen a oscilación con al ser realimentados. Se cierra el lazo eliminando las acciones derivativas e integradoras, es decir, se configura $$\tau_I = \infty$$ $$\tau_D = 0$$ Se varía la ganancia proporcional hasta obtener una respuesta en lazo cerrado que sea una oscilación de amplitud constante. - La ganancia del compensador que logra esta respuesta se conoce como ganancia crítica $K_u$.- El periodo de dicha oscilación es el periodo crítico $P_u$.Con estos valores se encuentran los parámetros del PID| Controlador | $K_p$ | $\tau_I$ | $\tau_d$ ||---------------|-----------|-------------------|---------------|| P | $0.5K_u$ | $\infty$ | $0$ || PI | $0.45K_u$ |$\frac{1}{1.2}P_u$ | $0 $ || PID | $0.6K_u$ | $0.5P_u$ | $0.125P_u$ |
###Code
GP
def resultplotKfeed(K):
plt.figure(figsize=(21, 7))
ts = np.linspace(0,5,1000)
_, yLCPIDk = control.step_response(control.feedback(K*GP,1),ts)
plt.plot(ts,yLCPIDk)
plt.grid(True)
interact(resultplotKfeed, K=(0.0, 20.0))
Ku = 9.4
Pu = 2.4
KC3 = 0.6*Ku
TI3 = 0.5*Pu
TD3 = 0.125*Pu
GC_PID3 = KC3*(1 + control.tf(1,[TI3,0]) + control.tf([TD3,0],1))
GC_PID3
_, yLCPID3 = control.step_response(control.feedback(GC_PID3*GP,1),ts)
plt.figure(figsize=(14, 7))
plt.plot(ts,yLA,
ts,yLC1,
ts,yLCPID1,
ts,yLCPID2,
ts,yLCPID3)
plt.legend(('Lazo abierto','Realimentado','PID directo','ZyN abierto','ZyN cerrado'))
plt.grid()
###Output
_____no_output_____
###Markdown
- ¿Cuál es la función de ransferencia en lazo cerrado desde la Referencia hacia le señal de control? $$\frac{Y}{Y_{sp}} = \frac{G_C G_P}{1+G_C G_P}$$$$E = Y_{sp}-Y$$$$E + Y = Y_{sp}$$$$Y = Y_{sp}- E$$$$Y = Y_{sp}- \frac{U}{G_C}$$$$U = E G_C$$$$Y = U G_P$$$$Y_{sp}- \frac{U}{G_C} = U G_P$$$$Y_{sp} = U G_P + \frac{U}{G_C}$$$$Y_{sp} = U (G_P + \frac{1}{G_C})$$$$Y_{sp} = U (\frac{G_C G_P}{G_C} + \frac{1}{G_C})$$$$Y_{sp} = U (\frac{G_C G_P + 1}{G_C})$$$$\frac{Y_{sp}}{U} = (\frac{G_C G_P + 1}{G_C})$$
###Code
GC_PID2/(1+GP*GC_PID2)
###Output
_____no_output_____
###Markdown
- ¿Cómo se comportan los distintos controladores ante una señal escalón?
###Code
# código necesario
_, uLCPID1 = control.step_response(control.feedback(sys1=GC_PID2,
sys2=GP),ts)
plt.figure(figsize=(14, 7))
plt.plot(ts,uLCPID1)
plt.legend(('PID directo'))
plt.grid()
###Output
_____no_output_____
###Markdown
Sintonización basada en criterios integralesEl comportamiento de los lazos de control puede evaluarse mediante la señal de error $e(t) = ref(t) - Y(t)$. A continuación algunos índices basados en el error.| Índice | Definición ||------------------------------------------------------|------------------------------|| IAE:Integral del valor absoluto del error | $$\int_0^\infty |e(t)|dt$$ || ISE:Integral del cuadrado del error | $$\int_0^\infty e(t)^2dt$$ || ITAE:Integral de tiempo por valor absoluto del error | $$\int_0^\infty t|e(t)|dt$$|Una técnica de diseño consiste en encontrar los parámetros de un compensador PID que minimicen alguno de los índices mencionados con base en un modelo para el sistema. Puede explorar más [aquí](https://www.researchgate.net/publication/260058488_METODOS_DE_SINTONIZACION_DE_CONTROLADORES_PID_QUE_OPERAN_COMO_SERVOMECANISMOS), [aquí](https://www.researchgate.net/publication/260058590_Optimizacion_del_desempeno_de_los_reguladores_y_servomecanismos_PID) y [aquí](https://www.researchgate.net/publication/255641510_SINTONIZACION_DE_CONTROLADORES_PI_Y_PID_UTILIZANDO_LOS_CRITERIOS_INTEGRALES_IAE_E_ITAE) - Encuentre y simule un controlador PID que minimice el índice ITAE para el sistema
###Code
GP
# código necesario
###Output
_____no_output_____
###Markdown
- ¿Cómo se comportan los distintos controladores ante una señal escalón?
###Code
# código necesario
###Output
_____no_output_____ |
01/chapter2/06_example.ipynb | ###Markdown
---- 6. 간단한 가계부 프로그램 만들기
###Code
class AccountBook:
def __init__(self):
self.total_price = 0
self.year_price_dict = {}
self.month_price_dict = {}
self.day_price_dict = {}
def write_account(self, year, month, day, price):
self.total_price = self.total_price + price
self.write_year_account(year, price)
self.write_month_account(month, price)
self.write_day_account(day, price)
def write_year_account(self, year, price):
if year in self.year_price_dict:
self.year_price_dict[year] = self.year_price_dict[year] + price
else:
self.year_price_dict[year] = price
def write_month_account(self, month, price):
if month in self.month_price_dict:
self.month_price_dict[month] = self.month_price_dict[month] + price
else:
self.month_price_dict[month] = price
def write_day_account(self, day, price):
if day in self.day_price_dict:
self.day_price_dict[day] = self.day_price_dict[day] + price
else:
self.day_price_dict[day] = price
def print_year_account(self, year):
if year in self.year_price_dict:
print("total", year, "year account:", self.year_price_dict[year])
else:
print("total", year, "year account:", 0)
def print_month_account(self, month):
if month in self.month_price_dict:
print("total", month, "month account:", self.month_price_dict[month])
else:
print("total", month, "month account:", 0)
def print_day_account(self, day):
if day in self.day_price_dict:
print("total", day, "day account:", self.day_price_dict[day])
else:
print("total", day, "day account:", 0)
def get_account_total_price(self):
return self.total_price
account_book1 = AccountBook()
account_book1.write_account(1992, 6, 13, 50000)
account_book1.write_account(1998, 6, 18, 15000)
account_book1.write_account(1994, 8, 11, 310000)
account_book1.write_account(1992, 9, 18, 500)
account_book1.write_account(1994, 10, 13, 21800)
account_book1.write_account(1998, 8, 21, 289100)
account_book1.write_account(2000, 6, 13, 13000)
account_book1.print_year_account(1994)
account_book1.print_year_account(1992)
account_book1.print_year_account(2000)
account_book1.print_year_account(2001)
account_book1.print_month_account(6)
account_book1.print_month_account(8)
account_book1.print_day_account(13)
print(account_book1.get_account_total_price())
###Output
699400
|
imdb/imdbtrain.ipynb | ###Markdown
Sequence classification model for IMDB Sentiment Analysis(c) Deniz Yuret, 2018 * Objectives: Learn the structure of the IMDB dataset and train a simple RNN model.* Prerequisites: RNN models (06.rnn.ipynb), param, GRU, nll, minibatch, accuracy, Adam, train!* Knet: dir (used by imdb.jl)
###Code
using Pkg
for p in ("Knet","ProgressMeter")
haskey(Pkg.installed(),p) || Pkg.add(p)
end
EPOCHS=3 # Number of training epochs
BATCHSIZE=64 # Number of instances in a minibatch
EMBEDSIZE=125 # Word embedding size
NUMHIDDEN=100 # Hidden layer size
MAXLEN=150 # maximum size of the word sequence, pad shorter sequences, truncate longer ones
VOCABSIZE=30000 # maximum vocabulary size, keep the most frequent 30K, map the rest to UNK token
NUMCLASS=2 # number of output classes
DROPOUT=0.0 # Dropout rate
LR=0.001 # Learning rate
BETA_1=0.9 # Adam optimization parameter
BETA_2=0.999 # Adam optimization parameter
EPS=1e-08 # Adam optimization parameter
###Output
_____no_output_____
###Markdown
Load and view data
###Code
using Knet: Knet
ENV["COLUMNS"]=92 # column width for array printing
include(Knet.dir("data","imdb.jl")) # defines imdb loader
@doc imdb
@time (xtrn,ytrn,xtst,ytst,imdbdict)=imdb(maxlen=MAXLEN,maxval=VOCABSIZE);
summary.((xtrn,ytrn,xtst,ytst,imdbdict))
# Words are encoded with integers
rand(xtrn)'
# Each word sequence is padded or truncated to length 150
length.(xtrn)'
# Define a function that can print the actual words:
imdbvocab = Array{String}(undef,length(imdbdict))
for (k,v) in imdbdict; imdbvocab[v]=k; end
imdbvocab[VOCABSIZE-2:VOCABSIZE] = ["<unk>","<s>","<pad>"]
function reviewstring(x,y=0)
x = x[x.!=VOCABSIZE] # remove pads
"""$(("Sample","Negative","Positive")[y+1]) review:\n$(join(imdbvocab[x]," "))"""
end
# Hit Ctrl-Enter to see random reviews:
r = rand(1:length(xtrn))
println(reviewstring(xtrn[r],ytrn[r]))
# Here are the labels: 1=negative, 2=positive
ytrn'
###Output
_____no_output_____
###Markdown
Define the model
###Code
using Knet: param, dropout, RNN
struct SequenceClassifier; input; rnn; output; end
SequenceClassifier(input::Int, embed::Int, hidden::Int, output::Int) =
SequenceClassifier(param(embed,input), RNN(embed,hidden,rnnType=:gru), param(output,hidden))
function (sc::SequenceClassifier)(input; pdrop=0)
embed = sc.input[:, permutedims(hcat(input...))]
embed = dropout(embed,pdrop)
hidden = sc.rnn(embed)
hidden = dropout(hidden,pdrop)
return sc.output * hidden[:,:,end]
end
###Output
_____no_output_____
###Markdown
Experiment
###Code
using Knet: minibatch
dtrn = minibatch(xtrn,ytrn,BATCHSIZE;shuffle=true)
dtst = minibatch(xtst,ytst,BATCHSIZE)
length.((dtrn,dtst))
# For running experiments
using Knet: train!, Adam
import ProgressMeter
function trainresults(file,model)
if (print("Train from scratch? ");readline()[1]=='y')
updates = 0; prog = ProgressMeter.Progress(EPOCHS * length(dtrn))
function callback(J)
ProgressMeter.update!(prog, updates)
return (updates += 1) <= prog.n
end
opt = Adam(lr=LR, beta1=BETA_1, beta2=BETA_2, eps=EPS)
train!(model, dtrn; callback=callback, optimizer=opt, pdrop=DROPOUT)
Knet.gc()
Knet.save(file,"model",model)
else
isfile(file) || download("http://people.csail.mit.edu/deniz/models/tutorial/$file",file)
model = Knet.load(file,"model")
end
return model
end
using Knet: nll, accuracy
model = SequenceClassifier(VOCABSIZE,EMBEDSIZE,NUMHIDDEN,NUMCLASS)
nll(model,dtrn), nll(model,dtst), accuracy(model,dtrn), accuracy(model,dtst)
model = trainresults("imdbmodel.jld2",model);
# 33s (0.059155148f0, 0.3877507f0, 0.9846153846153847, 0.8583733974358975)
nll(model,dtrn), nll(model,dtst), accuracy(model,dtrn), accuracy(model,dtst)
###Output
_____no_output_____
###Markdown
Playground
###Code
predictstring(x)="\nPrediction: " * ("Negative","Positive")[argmax(Array(vec(model([x]))))]
UNK = VOCABSIZE-2
str2ids(s::String)=[(i=get(imdbdict,w,UNK); i>=UNK ? UNK : i) for w in split(lowercase(s))]
# Here we can see predictions for random reviews from the test set; hit Ctrl-Enter to sample:
r = rand(1:length(xtst))
println(reviewstring(xtst[r],ytst[r]))
println(predictstring(xtst[r]))
# Here the user can enter their own reviews and classify them:
println(predictstring(str2ids(readline(stdin))))
###Output
stdin> no
Prediction: Negative
|
demographic data analyzer/demographic_data_analyzer.ipynb | ###Markdown
**Demographic Data Analyzer**
###Code
import pandas as pd
# Read data from file
df = pd.read_csv("adult.data.csv")
df.head()
df.describe()
df.shape
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 32561 entries, 0 to 32560
Data columns (total 15 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 age 32561 non-null int64
1 workclass 32561 non-null object
2 fnlwgt 32561 non-null int64
3 education 32561 non-null object
4 education-num 32561 non-null int64
5 marital-status 32561 non-null object
6 occupation 32561 non-null object
7 relationship 32561 non-null object
8 race 32561 non-null object
9 sex 32561 non-null object
10 capital-gain 32561 non-null int64
11 capital-loss 32561 non-null int64
12 hours-per-week 32561 non-null int64
13 native-country 32561 non-null object
14 salary 32561 non-null object
dtypes: int64(6), object(9)
memory usage: 3.7+ MB
###Markdown
How many of each race are represented in this dataset? This should be a Pandas series with race names as the index labels.
###Code
race_count = df["race"].value_counts()
print(race_count)
###Output
White 27816
Black 3124
Asian-Pac-Islander 1039
Amer-Indian-Eskimo 311
Other 271
Name: race, dtype: int64
###Markdown
What is the average age of men?
###Code
male = df[df["sex"]=="Male"]
# print(male)
average_age_man = round(male["age"].mean(),ndigits=1)
print(f"{average_age_man} years")
###Output
39.4 years
###Markdown
What is the percentage of people who have a Bachelor's degree?
###Code
total_bachelor_degree = df[df["education"]=="Bachelors"]["education"].count()
# print(total_bachelor_degree)
total_degree = df["education"].count()
# print(total_degree)
percentage_bachelors = round(100*total_bachelor_degree/total_degree, ndigits=1)
print(f"{percentage_bachelors}%")
###Output
16.4%
###Markdown
What percentage of people with advanced education (`Bachelors`, `Masters`, or `Doctorate`) make more than 50K?What percentage of people without advanced education make more than 50K?with and without `Bachelors`, `Masters`, or `Doctorate`
###Code
df["education"].value_counts()
higher_education = df[df["education"].isin(["Bachelors","Masters","Doctorate"])]
# print(higher_education["education"].value_counts())
lower_education = df[~df["education"].isin(["Bachelors","Masters","Doctorate"])]
# print(lower_education["education"].value_counts())
# percentage with salary>50K
# print(higher_education["salary"].value_counts())
higher_education_rich = round(100*higher_education[higher_education["salary"]==">50K"]["salary"].count()/higher_education["salary"].count(), ndigits=1)
print(f"{higher_education_rich}%")
lower_education_rich = round(100*lower_education[lower_education["salary"]==">50K"]["salary"].count()/lower_education["salary"].count(), ndigits=1)
print(f"{lower_education_rich}%")
###Output
46.5%
17.4%
###Markdown
What is the minimum number of hours a person works per week (hours-per-week feature)?
###Code
# print(df["hours-per-week"].value_counts())
min_work_hours = df["hours-per-week"].min()
print(f"{min_work_hours} hours")
###Output
1 hours
###Markdown
What percentage of the people who work the minimum number of hours per week have a salary of >50K?
###Code
num_min_workers = df[df["hours-per-week"]==df["hours-per-week"].min()]["hours-per-week"].count()
print(f"{num_min_workers} people work for minimum hours in a week")
min_workers = df[df["hours-per-week"]==df["hours-per-week"].min()]
min_workers_rich = min_workers[min_workers["salary"]==">50K"]["salary"].count()
print(f"{min_workers_rich} people work for minimun hours but earn more than 50K")
rich_percentage = round(100*min_workers_rich/num_min_workers, ndigits=1)
print(f"{rich_percentage}% people earns highly by working minimum number of hours in a week")
###Output
20 people work for minimum hours in a week
2 people work for minimun hours but earn more than 50K
10.0% people earns highly by working minimum number of hours in a week
###Markdown
Which country has the highest percentage of people that earn >50K?
###Code
higher_earning_ratio_country = (df[df["salary"]==">50K"]["native-country"].value_counts()/df["native-country"].value_counts()).sort_values(ascending=False)
print(higher_earning_ratio_country)
highest_earning_country = (df[df["salary"]==">50K"]["native-country"].value_counts()/df["native-country"].value_counts()).idxmax()
print(f"{highest_earning_country} has the highest percentage of people that earn more than 50K")
highest_earning_country_percentage = round(100*(df[df["salary"]==">50K"]["native-country"].value_counts()/df["native-country"].value_counts()).max(), ndigits=1)
print(f"{highest_earning_country_percentage}% people of {highest_earning_country} earn more than 50K")
###Output
41.9% people of Iran earn more than 50K
###Markdown
Identify the most popular occupation for those who earn >50K in India.
###Code
earn_rich = df[df["salary"]==">50K"]
earn_rich_INDIA = earn_rich[earn_rich["native-country"]=="India"]
top_INDIA_occupation_list = (earn_rich_INDIA["occupation"].value_counts()).sort_values(ascending=False)
print(top_INDIA_occupation_list)
top_INDIA_occupation = top_INDIA_occupation_list.index[0]
print(f"{top_INDIA_occupation} is the most popular occupation among high earning people in INDIA")
###Output
Prof-specialty 25
Exec-managerial 8
Other-service 2
Tech-support 2
Transport-moving 1
Sales 1
Adm-clerical 1
Name: occupation, dtype: int64
Prof-specialty is the most popular occupation among high earning people in INDIA
|
PySpark/sparkRDD_Transformations.ipynb | ###Markdown
***SPARK RDD TRANSFORMATIONS*** **map(func)**
###Code
# Return a new distributed dataset formed by passing each element of the source through a function func.
data = [1,2,3,4,5]
source_RDD=sc.parallelize(data,4)
result_RDD = source_RDD.map(lambda x:x**2)
print('Source RDD=',source_RDD.collect())
print('Result RDD after map = ',result_RDD.collect())
###Output
_____no_output_____
###Markdown
**filter(func)**
###Code
# Return a new dataset formed by selecting those elements of the source on which func returns true.
data = [1,2,3,4,5]
source_RDD=sc.parallelize(data,4)
result_RDD = source_RDD.filter(lambda x:x%2==0) # lambda function returns true when the element is even
print('Source RDD=',source_RDD.collect())
print('Result RDD after filter = ',result_RDD.collect())
###Output
_____no_output_____
###Markdown
**flatMap(func)**
###Code
# Similar to map, but each input item can be mapped to 0 or more output items (so func should return a Seq rather than a single item).
data = [1,2,3,4,5]
source_RDD=sc.parallelize(data,4)
result_RDD1 = source_RDD.flatMap(lambda x:(x,x))
result_RDD2 = source_RDD.flatMap(lambda x:[(x,x**2)])
result_RDD3 = source_RDD.flatMap(lambda x:range(1,x))
print('Source RDD=',source_RDD.collect())
print('Result RDD1 after flatMap = ',result_RDD1.collect())
print('Result RDD2 after flatMap = ',result_RDD2.collect())
print('Result RDD3 after flatMap = ',result_RDD3.collect())
###Output
_____no_output_____
###Markdown
**mapPartitions(func)**
###Code
# Similar to map, but runs separately on each partition (block) of the RDD, so func must be of type Iterator<T> => Iterator<U> when running on an RDD of type T.
def process_partition(partitions):
yield sum(partitions)
data = [1,2,3,3,2,5,2,4,3]
source_RDD = sc.parallelize(data,3)
print('Source RDD=',source_RDD.collect())
print('Source RDD at partition level=',source_RDD.glom().collect())
result_RDD = source_RDD.mapPartitions(process_partition)
print('Resultant RDD after mapPartitions=',result_RDD.collect())
###Output
_____no_output_____
###Markdown
**mapPartitionsWithIndex(func)**
###Code
# Similar to mapPartitions, but also provides func with an integer value representing the index of the partition, so func must be of type (Int, Iterator<T>) => Iterator<U> when running on an RDD of type T.
def process_partition(index,iterator):
yield (index,sum(iterator))
data = [1,2,3,3,2,5,2,4,3]
source_RDD = sc.parallelize(data,3)
print('Source RDD=',source_RDD.collect())
print('Source RDD at partition level=',source_RDD.glom().collect())
result_RDD = source_RDD.mapPartitionsWithIndex(process_partition)
print('Resultant RDD after mapPartitionsWithIndex=',result_RDD.collect())
###Output
_____no_output_____
###Markdown
**sample(withReplacement, fraction, seed)**
###Code
# Sample a fraction fraction of the data, with or without replacement, using a given random number generator seed.
data = range(1,101)
source_RDD=sc.parallelize(data)
result_RDD = source_RDD.sample(False,0.1,1)
print('Source RDD=',source_RDD.collect())
print('Result RDD after sample = ',result_RDD.collect())
###Output
_____no_output_____
###Markdown
**union(otherDataset)**
###Code
# Return a new dataset that contains the union of the elements in the source dataset and the argument.
data=[1,2,3]
data2 = [2,4,5]
source_RDD = sc.parallelize(data)
argument_RDD = sc.parallelize(data2)
result_RDD = source_RDD.union(argument_RDD)
print('Source RDD=',source_RDD.collect())
print('Another RDD=',argument_RDD.collect())
print('Result RDD after union = ',result_RDD.collect())
###Output
_____no_output_____
###Markdown
**intersection(otherDataset)**
###Code
# Return a new RDD that contains the intersection of elements in the source dataset and the argument.
data=[1,2,3]
data2 = [2,4,5]
source_RDD = sc.parallelize(data)
argument_RDD = sc.parallelize(data2)
result_RDD = source_RDD.intersection(argument_RDD)
print('Source RDD=',source_RDD.collect())
print('Another RDD=',argument_RDD.collect())
print('Result RDD after intersection = ',result_RDD.collect())
###Output
_____no_output_____
###Markdown
**distinct([numPartitions])**
###Code
# Return a new dataset that contains the distinct elements of the source dataset.
data=[1,2,2,3,5,5]
source_RDD = sc.parallelize(data)
result_RDD = source_RDD.distinct(2)
print('Source RDD=',source_RDD.collect())
print('Result RDD after distinct = ',result_RDD.collect())
###Output
_____no_output_____
###Markdown
**groupByKey([numPartitions])**
###Code
# When called on a dataset of (K, V) pairs, returns a dataset of (K, Iterable<V>) pairs.
# Note: If you are grouping in order to perform an aggregation (such as a sum or average) over each key, using reduceByKey or aggregateByKey will yield much better performance.
# Note: By default, the level of parallelism in the output depends on the number of partitions of the parent RDD. You can pass an optional numPartitions argument to set a different number of tasks.
data=[(1,'A'),(2,'B'),(3,'C'),(4,'D'),(1,'D'),(4,'H'),(1,'Z'),(3,'O'),(3,'P')]
source_RDD = sc.parallelize(data)
result_RDD = source_RDD.groupByKey(2)
print('Source RDD=',source_RDD.collect())
print('Result RDD after groupByKey = ',result_RDD.collect())
# To see the Iterable in the above cell
# We can turn the results of groupByKey into a list by calling list()
result_RDD=source_RDD.groupByKey().map(lambda x : (x[0], list(x[1]))).collect()
print('Result RDD after groupByKey with map = ',result_RDD)
###Output
_____no_output_____
###Markdown
**reduceByKey(func, [numPartitions])**
###Code
# When called on a dataset of (K, V) pairs, returns a dataset of (K, V) pairs where the values for each key are aggregated using the given reduce function func, which must be of type (V,V) => V. Like in groupByKey, the number of reduce tasks is configurable through an optional second argument.
data=[(1,'A'),(2,'B'),(3,'C'),(4,'D'),(1,'D'),(4,'H'),(1,'Z'),(3,'O'),(3,'P')]
source_RDD = sc.parallelize(data)
result_RDD = source_RDD.reduceByKey(lambda a,b:a+b) # Using lambda function to concatenate the string based on keys
print('Source RDD=',source_RDD.collect())
print('Result RDD after reduceByKey = ',result_RDD.collect())
###Output
_____no_output_____
###Markdown
**aggregateByKey(zeroValue, seqFunc, combFunc)**
###Code
# When called on a dataset of (K, V) pairs, returns a dataset of (K, U) pairs where the values for each key are aggregated using the given combine functions and a neutral "zero" value. Allows an aggregated value type that is different than the input value type, while avoiding unnecessary allocations. Like in groupByKey, the number of reduce tasks is configurable through an optional second argument.
data=[('A',1),('B',10),('C',3),('A',4),('C',3),('C',5)]
source_RDD = sc.parallelize(data)
seqFunc = (lambda x,y: (x[0]+y,x[1]+1))
combFunc = (lambda rdd1,rdd2: (rdd1[0]+rdd2[0],rdd1[1]+rdd2[1]))
result_RDD = source_RDD.aggregateByKey((0, 0),seqFunc,combFunc)
print('Source RDD=',source_RDD.collect())
print('Source RDD=',source_RDD.glom().collect())
print('Result RDD after aggregateByKey = ',result_RDD.collect()) # returns the sum of values of each key and the number of occurance of each key
###Output
_____no_output_____
###Markdown
**sortByKey([ascending], [numPartitions])**
###Code
# When called on a dataset of (K, V) pairs where K implements Ordered, returns a dataset of (K, V) pairs sorted by keys in ascending or descending order, as specified in the boolean ascending argument.
data=[(1,'A'),(2,'B'),(3,'C'),(4,'D'),(1,'D'),(4,'H'),(1,'Z'),(3,'O'),(3,'P')]
source_RDD = sc.parallelize(data)
result_RDD = source_RDD.sortByKey()
print('Source RDD=',source_RDD.collect())
print('Result RDD after sortByKey = ',result_RDD.collect())
###Output
_____no_output_____
###Markdown
**join(otherDataset, [numPartitions])**
###Code
# When called on datasets of type (K, V) and (K, W), returns a dataset of (K, (V, W)) pairs with all pairs of elements for each key. Outer joins are supported through leftOuterJoin, rightOuterJoin, and fullOuterJoin.
data1=[(1,'A'),(2,'B'),(3,'C')]
data2=[(1,'F'),(3,'L'),(4,'M')]
source_RDD1 = sc.parallelize(data1)
source_RDD2 = sc.parallelize(data2)
result_RDD = source_RDD1.join(source_RDD2)
print('Source RDD 1=',source_RDD1.collect())
print('Source RDD 2=',source_RDD2.collect())
print('Result RDD after join = ',result_RDD.collect())
###Output
_____no_output_____
###Markdown
**cogroup(otherDataset, [numPartitions])**
###Code
# When called on datasets of type (K, V) and (K, W), returns a dataset of (K, (Iterable<V>, Iterable<W>)) tuples. This operation is also called groupWith.
data1=[(1,'A'),(2,'B'),(3,'C')]
data2=[(1,'F'),(3,'L'),(4,'M')]
source_RDD1 = sc.parallelize(data1)
source_RDD2 = sc.parallelize(data2)
result_RDD = source_RDD1.cogroup(source_RDD2)
print('Source RDD 1=',source_RDD1.collect())
print('Source RDD 2=',source_RDD2.collect())
print('Result RDD after cogroup = ',result_RDD.collect())
###Output
_____no_output_____
###Markdown
**cartesian(otherDataset)**
###Code
# When called on datasets of types T and U, returns a dataset of (T, U) pairs (all pairs of elements).
source_RDD1 = sc.parallelize([0,1])
source_RDD2 = sc.parallelize([3,4])
result_RDD = source_RDD1.cartesian(source_RDD2)
print('Source RDD 1=',source_RDD1.collect())
print('Source RDD 2=',source_RDD2.collect())
print('Result RDD after cogroup = ',result_RDD.collect())
###Output
_____no_output_____
###Markdown
**pipe(command, [envVars])**
###Code
# Pipe each partition of the RDD through a shell command, e.g. a Perl or bash script. RDD elements are written to the process's stdin and lines output to its stdout are returned as an RDD of strings.
sc.parallelize([1, 2, 3, 4]).pipe('cat').collect()
###Output
_____no_output_____
###Markdown
**coalesce(numPartitions)**
###Code
# Decrease the number of partitions in the RDD to numPartitions. Useful for running operations more efficiently after filtering down a large dataset.
data = [1,2,3,4]
source_RDD = sc.parallelize(data,4)
result_RDD = source_RDD.coalesce(2)
print("No. of Partitions before coalesce: " ,source_RDD.getNumPartitions())
print("Partitions before coalesce: " ,source_RDD.glom().collect())
print("No. of Partitions after coalesce: " ,result_RDD.getNumPartitions())
print("Partitions after coalesce: " ,result_RDD.glom().collect())
###Output
_____no_output_____
###Markdown
**repartition(numPartitions)**
###Code
# Reshuffle the data in the RDD randomly to create either more or fewer partitions and balance it across them. This always shuffles all data over the network.
data = [1,2,3,4]
source_RDD = sc.parallelize(data,4)
result_RDD = source_RDD.repartition(2)
print("No. of Partitions before repartition: " ,source_RDD.getNumPartitions())
print("Partitions before repartition: " ,source_RDD.glom().collect())
print("No. of Partitions after repartition: " ,result_RDD.getNumPartitions())
print("Partitions after repartition: " ,result_RDD.glom().collect())
###Output
_____no_output_____
###Markdown
**repartitionAndSortWithinPartitions(partitioner)**
###Code
# Repartition the RDD according to the given partitioner and, within each resulting partition, sort records by their keys. This is more efficient than calling repartition and then sorting within each partition because it can push the sorting down into the shuffle machinery.
source_RDD = sc.parallelize([["a",1], ["b",2], ["c",3], ["d",3]])
result_RDD = source_RDD.repartitionAndSortWithinPartitions(2)
print("RDD :", source_RDD.collect())
print("RDD after repartitionAndSortWithinPartitions:", result_RDD.glom().collect())
###Output
_____no_output_____ |
models/keras_fn/focal_loss_test.ipynb | ###Markdown
Focal lossgamma_candidates = \[0, 0.5, 1, 2, 5\] BinaryFocalLossBinary logits mode, in this mode, the prediction tensor is a rank-2 tensor with two probabilities, such as:y_pred = \[0.1, 0.9\] for predict the y_true = \[0., 1.\]
###Code
# focal loss paramaters
alpha=0.99
gamma=1
fl = BinaryFocalLoss(alpha=alpha, gamma=gamma)
y_pred = [0.1, 0.9]
y_true = [0., 1.]
fl(y_true, y_pred)
# Using 'auto'/'sum_over_batch_size' reduction type.
print(f"fl: {fl(y_true, y_pred).numpy()}")
alpha = 1
gamma = 0
fl_0 = BinaryFocalLoss(alpha=alpha, gamma=0)
fl_0_5 = BinaryFocalLoss(alpha=alpha, gamma=0.5)
fl_1 = BinaryFocalLoss(alpha=alpha, gamma=1)
fl_2 = BinaryFocalLoss(alpha=alpha, gamma=2)
fl_5 = BinaryFocalLoss(alpha=alpha, gamma=5)
###Output
_____no_output_____
###Markdown
class 0 is positive
###Code
# class 0 label, class 1 label
y_true = np.asarray([0., 1.])
p = np.linspace(0.00000001, 0.999, 1000)
num_samples = p.shape[0]
focal_loss = np.zeros((num_samples, 5))
for i, _p in enumerate(p):
y_pred = np.asarray([_p, 1-_p])
focal_loss[i, 0] = fl_0(y_true, y_pred)
focal_loss[i, 1] = fl_0_5(y_true, y_pred)
focal_loss[i, 2] = fl_1(y_true, y_pred)
focal_loss[i, 3] = fl_2(y_true, y_pred)
focal_loss[i, 4] = fl_5(y_true, y_pred)
plt.plot(p, focal_loss[:,0], label="$\gamma=0$")
plt.plot(p, focal_loss[:,1], label="$\gamma=0.5$")
plt.plot(p, focal_loss[:,2], label="$\gamma=1$")
plt.plot(p, focal_loss[:,3], label="$\gamma=2$")
plt.plot(p, focal_loss[:,4], label="$\gamma=5$")
plt.xlabel("p")
plt.ylabel("loss")
plt.legend()
plt.grid()
###Output
_____no_output_____
###Markdown
class 1 is positive
###Code
# class 0 label, class 1 label
y_true = np.asarray([1., 0.])
p = np.linspace(0.00000001, 0.999, 1000)
num_samples = p.shape[0]
focal_loss = np.zeros((num_samples, 5))
for i, _p in enumerate(p):
y_pred = np.asarray([_p, 1-_p])
focal_loss[i, 0] = fl_0(y_true, y_pred)
focal_loss[i, 1] = fl_0_5(y_true, y_pred)
focal_loss[i, 2] = fl_1(y_true, y_pred)
focal_loss[i, 3] = fl_2(y_true, y_pred)
focal_loss[i, 4] = fl_5(y_true, y_pred)
plt.plot(p, focal_loss[:,0], label="$\gamma=0$")
plt.plot(p, focal_loss[:,1], label="$\gamma=0.5$")
plt.plot(p, focal_loss[:,2], label="$\gamma=1$")
plt.plot(p, focal_loss[:,3], label="$\gamma=2$")
plt.plot(p, focal_loss[:,4], label="$\gamma=5$")
plt.xlabel("p")
plt.ylabel("Focal loss")
plt.legend()
plt.grid()
###Output
_____no_output_____
###Markdown
Test alpha $\alpha$alpha=0.25
###Code
alpha = 1.0 # weight for positive class
fl_0 = BinaryFocalLoss(alpha=alpha, gamma=0)
fl_0_5 = BinaryFocalLoss(alpha=alpha, gamma=0.5)
fl_1 = BinaryFocalLoss(alpha=alpha, gamma=1)
fl_2 = BinaryFocalLoss(alpha=alpha, gamma=2)
fl_5 = BinaryFocalLoss(alpha=alpha, gamma=5)
p = np.linspace(0.00000000000000001, 1.0, 1000)
###Output
_____no_output_____
###Markdown
class 0 loss function
###Code
y_true = np.asarray([1., 0.])
num_samples = p.shape[0]
focal_loss = np.zeros((num_samples, 5))
for i, _p in enumerate(p):
y_pred = np.asarray([_p, 1-_p])
focal_loss[i, 0] = fl_0(y_true, y_pred)
focal_loss[i, 1] = fl_0_5(y_true, y_pred)
focal_loss[i, 2] = fl_1(y_true, y_pred)
focal_loss[i, 3] = fl_2(y_true, y_pred)
focal_loss[i, 4] = fl_5(y_true, y_pred)
plt.plot(p, focal_loss[:,0], label="$\gamma=0$")
plt.plot(p, focal_loss[:,1], label="$\gamma=0.5$")
plt.plot(p, focal_loss[:,2], label="$\gamma=1$")
plt.plot(p, focal_loss[:,3], label="$\gamma=2$")
plt.plot(p, focal_loss[:,4], label="$\gamma=5$")
plt.xlabel("p")
plt.ylabel("Focal loss")
plt.title("class 0 loss function")
plt.xlim(0, 1)
plt.ylim(0, 5)
plt.legend()
plt.grid()
###Output
_____no_output_____
###Markdown
class 1 loss function
###Code
y_true = np.asarray([0., 1.])
num_samples = p.shape[0]
focal_loss = np.zeros((num_samples, 5))
for i, _p in enumerate(p):
y_pred = np.asarray([_p, 1-_p])
focal_loss[i, 0] = fl_0(y_true, y_pred)
focal_loss[i, 1] = fl_0_5(y_true, y_pred)
focal_loss[i, 2] = fl_1(y_true, y_pred)
focal_loss[i, 3] = fl_2(y_true, y_pred)
focal_loss[i, 4] = fl_5(y_true, y_pred)
plt.plot(p, focal_loss[:,0], label="$\gamma=0$")
plt.plot(p, focal_loss[:,1], label="$\gamma=0.5$")
plt.plot(p, focal_loss[:,2], label="$\gamma=1$")
plt.plot(p, focal_loss[:,3], label="$\gamma=2$")
plt.plot(p, focal_loss[:,4], label="$\gamma=5$")
plt.xlabel("p")
plt.ylabel("Focal loss")
plt.title("class 1 loss function")
plt.xlim(0, 1)
plt.ylim(0, 5)
plt.legend()
plt.grid()
###Output
_____no_output_____
###Markdown
Test gamma $\gamma$
###Code
gamma = 0.1
fl_0 = BinaryFocalLoss(alpha=0.25, gamma=gamma)
y_true = np.asarray([1., 0.])
y_pred = np.asarray([0., 1.])
fl_0(y_true, y_pred)
###Output
_____no_output_____ |
Finding The Saddest Song from -- Insert The Artist Name Here --.ipynb | ###Markdown
Preparation Inisiasi library yang akan dipakai
###Code
%matplotlib inline
import pandas as pd
import spotipy
import spotipy.util as util
import matplotlib.pyplot as plt
from IPython.display import Image
from IPython.core.display import HTML
from tqdm import *
###Output
_____no_output_____
###Markdown
Minta access token dari Spotify
###Code
client_id='9ddb0fc5160e4844a7eb0e12d56eebeb'
client_secret='16cc5e6cf72444ff98666ced86ca8860'
token = spotipy.oauth2.SpotifyClientCredentials(client_id=client_id,
client_secret=client_secret)
token = token.get_access_token()
###Output
_____no_output_____
###Markdown
Cek token key, apakah berhasil didapat.
###Code
token
###Output
_____no_output_____
###Markdown
Getting Audio Data From Spotify Mencari Nama Musisi Misal, kita akan mencari artis dengan nama The Script. Pertama, inisiasi session menggunakan token key yang sudah kita dapat sebelumnya.
###Code
sp = spotipy.Spotify(auth=token)
artist_name="The National"
results = sp.search(q='artist:' + artist_name, type='artist')
items = results['artists']['items']
print(items[0]['name'])
print(items[0]['uri'])
###Output
The National
spotify:artist:2cCUtGK9sDU2EoElnk0GNB
###Markdown
Kita simpan uri musisi yang kita cari sebelumnya.
###Code
artist_uri=items[0]['uri']
###Output
_____no_output_____
###Markdown
Mengambil Data Album Selanjutnya, kita akan mengambil data album untuk yang telah dikeluarkan oleh Five For Fighting
###Code
albums_api = sp.artist_albums(artist_uri, album_type='album', country='US', limit=20, offset=0)['items']
###Output
_____no_output_____
###Markdown
Cek isi dari albums_api
###Code
albums_api
###Output
_____no_output_____
###Markdown
Karena terlalu banyak variabel yang memusingkan, kita coba cek jumlah album dengan len().
###Code
len(albums_api)
###Output
_____no_output_____
###Markdown
Cek nama album apa saja yang dikeluarkan oleh musisi yang kita analisis.
###Code
for i in reversed(range(0,len(albums_api))):
print(albums_api[i]['name'])
###Output
The National
Sad Songs for Dirty Lovers
Cherry Tree
Alligator
Boxer
The Virginia EP
High Violet
High Violet (Expanded Edition)
Trouble Will Find Me
###Markdown
Sekarang, akan kita buat dataframe album yang sudah kita dapat.
###Code
album_uri = [albums_api[i]['uri'] for i in reversed(range(0,len(albums_api)))]
album_name= [albums_api[i]['name'] for i in reversed(range(0,len(albums_api)))]
album_img = [albums_api[i]['images'][1]['url'] for i in reversed(range(0,len(albums_api)))]
album_info = pd.DataFrame(
{'album_name': album_name,
'album_uri': album_uri,
'album_img': album_img
})
album_info
album_info.drop(album_info.index[[2,5,7]], inplace=True)
album_info = album_info.reset_index(drop=True)
album_info
###Output
_____no_output_____
###Markdown
Mengambil Data Track dan Audio Features Setelah mendapatkan data album, selanjutnya akan digunakan untuk mencari track per album.
###Code
for i in tqdm(range(len(album_info))):
temp=pd.DataFrame(sp.album_tracks(album_info['album_uri'][i])['items'])
temp=temp.ix[:,['track_number','name','duration_ms','uri','preview_url']]
temp['album_name']=[album_info['album_name'][i]]*len(temp)
temp['album_img']=[album_info['album_img'][i]]*len(temp)
temp['album_uri']=[album_info['album_uri'][i]]*len(temp)
if i==0:
track_df=temp
else:
track_df=pd.concat([track_df, temp], ignore_index=True)
###Output
100%|█████████████████████████████████████████████████████████████| 6/6 [00:06<00:00, 1.03s/it]
###Markdown
Cek data track yang telah diambil.
###Code
track_df.head()
###Output
_____no_output_____
###Markdown
Sekarang, kita cek berapa banyak lagu ada dalam track_df.
###Code
len(track_df)
###Output
_____no_output_____
###Markdown
Kemudian kita akan coba memasukkan indeks valence dari Spotify.
###Code
valence=[sp.audio_features(track_df['uri'][i])[0]['valence'] for i in tqdm(range(len(track_df)))]
track_df['valence']=valence
track_df
###Output
_____no_output_____
###Markdown
Kita lihat 5 lagu dengan nilai valence terkecil.
###Code
new_track_df = track_df.sort_values('valence')
new_track_df.head()
new_track_df['preview_url'][31]
track_df['valence'].plot.box()
plt.figure(figsize=(10,10));
track_df['valence'].plot.hist()
###Output
_____no_output_____
###Markdown
Kita lanjutkan dengan mengambil data lirik. Getting Lyrics Data From A-Z Lyrics Untuk memperkuat analisis, kita akan menggunakan data lirik dari A-Z Lyrics
###Code
from bs4 import BeautifulSoup
from urllib.request import urlopen
import re
import string
import time
import random
exclude = set(string.punctuation)
artist_lyrics = []
track_name = track_df['name'][0]
track_name = track_name.replace(" ","")
track_name = ''.join(ch for ch in track_name if ch not in exclude)
track_name = track_name.lower()
section_url = "http://www.azlyrics.com/lyrics/" + "national/" + track_name + ".html"
print(section_url)
"""
for i in tqdm(range(len(track_df)):
track_name = track_df['name'][i]
track_name = track_name.replace(" ","")
track_name = ''.join(ch for ch in track_name if ch not in exclude)
track_name = track_name.lower()
section_url = "http://www.azlyrics.com/lyrics/" + "national/" + track_name + ".html"
html = urlopen(section_url).read()
soup = BeautifulSoup(html,"lxml")
lyrics_regex = re.sub("(\n|\r)","",str(soup))
lyrics = re.search("<!-- Usage of azlyrics.com content by any third-party lyrics provider is prohibited by our licensing agreement. Sorry about that. -->(.*)<!-- MxM banner -->",
lyrics_regex)
lyrics_1 = lyrics.group(1)
lyrics_1 = re.sub("\s{2,}"," ",lyrics_1)
lyrics_1 = re.sub("<(.[^>]*)>"," ",lyrics_1)
lyrics_1 = re.sub("(\[4x]|\[X4])","",lyrics_1)
lyrics_1 = re.sub("\s{2,}"," ",lyrics_1)
lyrics_1 = lyrics_1.lower()
exclude = set(string.punctuation)
lyrics_1 = ''.join(ch for ch in lyrics_1 if ch not in exclude)
artist_lyrics.append(lyrics_1)
time.sleep(10 + 5 * random.random())
"""
###Output
_____no_output_____
###Markdown
Alternative
###Code
artist_lyrics = pd.read_csv("lyrics.csv",header=0)
track_df['lyrics']=artist_lyrics
track_df.head()
###Output
_____no_output_____
###Markdown
Analysis Percentage of Sad Words And Total Words
###Code
from nltk.tokenize import word_tokenize
from collections import Counter
sad_words = pd.read_csv("sad_words.csv")
sad_words.head()
pct_sad = []
word_count = []
for i in tqdm(range(len(track_df))):
pct_sad_temp = 0
word_count_temp = 0
sad_word_count = 0
lyrics = track_df['lyrics'][i]
word_token = word_tokenize(lyrics)
word_count_temp = len(word_token)
counts = Counter(word_token)
for j in range(len(sad_words)):
if sad_words['word'][j] in counts.keys():
sad_word_count += counts.get(sad_words['word'][j])
pct_sad_temp = sad_word_count / word_count_temp
pct_sad.append(pct_sad_temp)
word_count.append(word_count_temp)
track_df['pct_sad'] = pct_sad
track_df['word_count']= word_count
track_df.head()
track_df.sort_values('pct_sad',ascending=False)
###Output
_____no_output_____
###Markdown
Gloom Index
###Code
gloom_index = []
for i in tqdm(range(len(track_df))):
gloom_index_temp = 0
lyrical_dens = track_df['word_count'][i] / track_df['duration_ms'][i] * 1000
valence = track_df['valence'][i]
pct_sad = track_df['pct_sad'][i]
gloom_index_temp = ((1-valence)+pct_sad*(1+lyrical_dens))/2
gloom_index.append(gloom_index_temp)
max(gloom_index)
gloom_index_new = [(((gloom - min(gloom_index)) * (100 - 0)) / (max(gloom_index) - min(gloom_index))) + 0 for gloom in gloom_index]
gloom_index_new.index(100)
track_df['name'][44]
track_df['gloom_index']=gloom_index_new
new_track_df = track_df.sort_values('gloom_index', ascending=False)
new_track_df = new_track_df.reset_index()
new_track_df
plt.figure(figsize=(10,10));
new_track_df['gloom_index'].plot.density()
plt.figure()
new_track_df.plot.scatter(y='valence', x='duration_ms', c=new_track_df['gloom_index']*4, s=100, figsize=(10,10), legend=True);
from pandas.tools.plotting import radviz
plt.figure(figsize=(10,10))
radviz(new_track_df[['valence','pct_sad', 'duration_ms','album_name']], 'album_name')
###Output
_____no_output_____ |
examples/notebooks/01-active-learning-for-text-classification-with-small-text-intro.ipynb | ###Markdown
Active Learning for Text Classification using Small-TextThis tutorial shows how to use [small-text](https://github.com/webis-de/small-text) to perform active learning for text classification using state-of-the-art transformer models. InstallationBesides small-text, we also install [datasets](https://github.com/huggingface/datasets) to load an example dataset and [matptlotlib](https://matplotlib.org/) to plot the learning curves at the end.
###Code
%pip install small-text[transformers] # use "small-text" without "[transformers]" if you want to work on the CPU only
%pip install datasets
%pip install matplotlib
###Output
Requirement already satisfied: small-text[transformers] in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (1.0.0a7)
Requirement already satisfied: numpy>=1.20.0 in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from small-text[transformers]) (1.21.4)
Requirement already satisfied: dill in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from small-text[transformers]) (0.3.4)
Requirement already satisfied: tqdm in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from small-text[transformers]) (4.62.3)
Requirement already satisfied: scipy in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from small-text[transformers]) (1.7.2)
Requirement already satisfied: scikit-learn>=0.24.1 in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from small-text[transformers]) (1.0.1)
Requirement already satisfied: torchtext>=0.7.0; extra == "transformers" in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from small-text[transformers]) (0.11.0)
Requirement already satisfied: transformers>=4.0.0; extra == "transformers" in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from small-text[transformers]) (4.12.3)
Requirement already satisfied: torch>=1.6.0; extra == "transformers" in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from small-text[transformers]) (1.10.0)
Requirement already satisfied: threadpoolctl>=2.0.0 in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from scikit-learn>=0.24.1->small-text[transformers]) (3.0.0)
Requirement already satisfied: joblib>=0.11 in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from scikit-learn>=0.24.1->small-text[transformers]) (1.1.0)
Requirement already satisfied: requests in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from torchtext>=0.7.0; extra == "transformers"->small-text[transformers]) (2.26.0)
Requirement already satisfied: tokenizers<0.11,>=0.10.1 in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from transformers>=4.0.0; extra == "transformers"->small-text[transformers]) (0.10.3)
Requirement already satisfied: filelock in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from transformers>=4.0.0; extra == "transformers"->small-text[transformers]) (3.3.2)
Requirement already satisfied: pyyaml>=5.1 in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from transformers>=4.0.0; extra == "transformers"->small-text[transformers]) (6.0)
Requirement already satisfied: huggingface-hub<1.0,>=0.1.0 in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from transformers>=4.0.0; extra == "transformers"->small-text[transformers]) (0.1.2)
Requirement already satisfied: sacremoses in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from transformers>=4.0.0; extra == "transformers"->small-text[transformers]) (0.0.46)
Requirement already satisfied: regex!=2019.12.17 in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from transformers>=4.0.0; extra == "transformers"->small-text[transformers]) (2021.11.10)
Requirement already satisfied: packaging>=20.0 in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from transformers>=4.0.0; extra == "transformers"->small-text[transformers]) (21.2)
Requirement already satisfied: typing-extensions in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from torch>=1.6.0; extra == "transformers"->small-text[transformers]) (3.10.0.2)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from requests->torchtext>=0.7.0; extra == "transformers"->small-text[transformers]) (1.26.7)
Requirement already satisfied: charset-normalizer~=2.0.0; python_version >= "3" in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from requests->torchtext>=0.7.0; extra == "transformers"->small-text[transformers]) (2.0.7)
Requirement already satisfied: idna<4,>=2.5; python_version >= "3" in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from requests->torchtext>=0.7.0; extra == "transformers"->small-text[transformers]) (3.3)
Requirement already satisfied: certifi>=2017.4.17 in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from requests->torchtext>=0.7.0; extra == "transformers"->small-text[transformers]) (2021.10.8)
Requirement already satisfied: click in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from sacremoses->transformers>=4.0.0; extra == "transformers"->small-text[transformers]) (8.0.3)
Requirement already satisfied: six in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from sacremoses->transformers>=4.0.0; extra == "transformers"->small-text[transformers]) (1.16.0)
Requirement already satisfied: pyparsing<3,>=2.0.2 in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from packaging>=20.0->transformers>=4.0.0; extra == "transformers"->small-text[transformers]) (2.4.7)
Note: you may need to restart the kernel to use updated packages.
Requirement already satisfied: datasets in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (1.15.1)
Requirement already satisfied: numpy>=1.17 in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from datasets) (1.21.4)
Requirement already satisfied: requests>=2.19.0 in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from datasets) (2.26.0)
Requirement already satisfied: pyarrow!=4.0.0,>=1.0.0 in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from datasets) (6.0.0)
Requirement already satisfied: multiprocess in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from datasets) (0.70.12.2)
Requirement already satisfied: dill in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from datasets) (0.3.4)
Requirement already satisfied: fsspec[http]>=2021.05.0 in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from datasets) (2021.11.0)
Requirement already satisfied: packaging in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from datasets) (21.2)
Requirement already satisfied: xxhash in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from datasets) (2.0.2)
Requirement already satisfied: aiohttp in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from datasets) (3.8.0)
Requirement already satisfied: pandas in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from datasets) (1.3.4)
Requirement already satisfied: tqdm>=4.62.1 in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from datasets) (4.62.3)
Requirement already satisfied: huggingface-hub<1.0.0,>=0.1.0 in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from datasets) (0.1.2)
Requirement already satisfied: certifi>=2017.4.17 in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from requests>=2.19.0->datasets) (2021.10.8)
Requirement already satisfied: charset-normalizer~=2.0.0; python_version >= "3" in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from requests>=2.19.0->datasets) (2.0.7)
Requirement already satisfied: idna<4,>=2.5; python_version >= "3" in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from requests>=2.19.0->datasets) (3.3)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from requests>=2.19.0->datasets) (1.26.7)
Requirement already satisfied: pyparsing<3,>=2.0.2 in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from packaging->datasets) (2.4.7)
Requirement already satisfied: async-timeout<5.0,>=4.0.0a3 in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from aiohttp->datasets) (4.0.1)
Requirement already satisfied: multidict<7.0,>=4.5 in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from aiohttp->datasets) (5.2.0)
Requirement already satisfied: frozenlist>=1.1.1 in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from aiohttp->datasets) (1.2.0)
Requirement already satisfied: aiosignal>=1.1.2 in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from aiohttp->datasets) (1.2.0)
Requirement already satisfied: attrs>=17.3.0 in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from aiohttp->datasets) (21.2.0)
Requirement already satisfied: yarl<2.0,>=1.0 in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from aiohttp->datasets) (1.7.2)
Requirement already satisfied: python-dateutil>=2.7.3 in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from pandas->datasets) (2.8.2)
Requirement already satisfied: pytz>=2017.3 in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from pandas->datasets) (2021.3)
Requirement already satisfied: typing-extensions>=3.7.4.3 in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from huggingface-hub<1.0.0,>=0.1.0->datasets) (3.10.0.2)
Requirement already satisfied: filelock in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from huggingface-hub<1.0.0,>=0.1.0->datasets) (3.3.2)
Requirement already satisfied: pyyaml in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from huggingface-hub<1.0.0,>=0.1.0->datasets) (6.0)
Requirement already satisfied: six>=1.5 in /home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages (from python-dateutil>=2.7.3->pandas->datasets) (1.16.0)
###Markdown
Loading the DataFirst we load rotten tomatoes dataset. This dataset contains movie reviews sentences, which are labeled by their sentiment as either positive or negative.
###Code
import datasets
import logging
import numpy as np
# disables the progress bar for notebooks: https://github.com/huggingface/datasets/issues/2651
datasets.logging.get_verbosity = lambda: logging.NOTSET
raw_dataset = datasets.load_dataset('rotten_tomatoes')
num_classes = np.unique(raw_dataset['train']['label']).shape[0]
print('First 10 training samples:\n')
for i in range(10):
print(raw_dataset['train']['label'][i], ' ', raw_dataset['train']['text'][i])
###Output
Using custom data configuration default
Reusing dataset rotten_tomatoes_movie_review (/home/cschroeder/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/e06abb624abab47e1a64608fdfe65a913f5a68c66118408032644a3285208fb5)
###Markdown
Preparing the DataNext, we have to convert this raw text data into a format usable by small-text. Since the transformer-based classification in small-text uses huggingface transformers this step is pretty similar to the preprocessing you may know from transformers, with the addition that the end result must be a `TransformersDataset`. In this example, we use `bert-base-uncased` as transformer model.
###Code
import transformers
from transformers import AutoTokenizer
transformers.logging.get_verbosity = lambda: logging.NOTSET
transformer_model_name = 'bert-base-uncased'
tokenizer = AutoTokenizer.from_pretrained(
transformer_model_name
)
###Output
_____no_output_____
###Markdown
We define a small helper function `get_transformers_dataset()` with delegates to `tokenizer.encode_plus()` and finally builds a `TransformersDataset` instance.
###Code
from small_text.integrations.transformers.datasets import TransformersDataset
def get_transformers_dataset(tokenizer, data, labels, max_length=60):
data_out = []
for i, doc in enumerate(data):
encoded_dict = tokenizer.encode_plus(
doc,
add_special_tokens=True,
padding='max_length',
max_length=max_length,
return_attention_mask=True,
return_tensors='pt',
truncation='longest_first'
)
data_out.append((encoded_dict['input_ids'], encoded_dict['attention_mask'], labels[i]))
return TransformersDataset(data_out)
train = get_transformers_dataset(tokenizer, raw_dataset['train']['text'], raw_dataset['train']['label'])
test = get_transformers_dataset(tokenizer, raw_dataset['test']['text'], raw_dataset['test']['label'])
###Output
_____no_output_____
###Markdown
Setting up the Active LearnerHere, we constrauct a `PoolBasedActiveLearner` instance which requires a classifier factory, a query strategy, and the train dataset.To obtain a first model, we initialize the active learner by providing the true labels for 10 sentences. This corresponds to an initial labeling the real-world setting.
###Code
from small_text.active_learner import PoolBasedActiveLearner
from small_text.initialization import random_initialization_balanced
from small_text.integrations.transformers import TransformerModelArguments
from small_text.integrations.transformers.classifiers.factories import TransformerBasedClassificationFactory
from small_text.query_strategies import PredictionEntropy
from small_text.integrations.transformers import TransformerModelArguments
# simulates an initial labeling to warm-start the active learning process
def initialize_active_learner(active_learner, y_train):
x_indices_initial = random_initialization_balanced(y_train, n_samples=10)
y_initial = y_train[x_indices_initial]
active_learner.initialize_data(x_indices_initial, y_initial)
return x_indices_initial
transformer_model = TransformerModelArguments(transformer_model_name)
clf_factory = TransformerBasedClassificationFactory(transformer_model,
num_classes,
kwargs=dict({'device': 'cuda',
'mini_batch_size': 32,
'early_stopping_no_improvement': -1
}))
query_strategy = PredictionEntropy()
active_learner = PoolBasedActiveLearner(clf_factory, query_strategy, train)
labeled_indices = initialize_active_learner(active_learner, train.y)
###Output
Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForSequenceClassification: ['cls.predictions.decoder.weight', 'cls.predictions.transform.LayerNorm.bias', 'cls.seq_relationship.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.transform.dense.bias', 'cls.seq_relationship.weight']
- This IS expected if you are initializing BertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
- This IS NOT expected if you are initializing BertForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
Some weights of BertForSequenceClassification were not initialized from the model checkpoint at bert-base-uncased and are newly initialized: ['classifier.bias', 'classifier.weight']
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
WARNING:small_text.integrations.transformers.classifiers.classification:Overridering scheduler since optimizer in kwargs needs to be passed in combination with scheduler
/home/cschroeder/.local/share/virtualenvs/notebooks-R6UeU-dP/lib/python3.8/site-packages/small_text/integrations/pytorch/utils/data.py:35: FutureWarning: The input object of type 'Tensor' is an array-like implementing one of the corresponding protocols (`__array__`, `__array_interface__` or `__array_struct__`); but not a sequence (or 0-D). In the future, this object will be coerced as if it was first converted using `np.array(obj)`. To retain the old behaviour, you have to either modify the type 'Tensor', or assign to an empty array created with `np.empty(correct_shape, dtype=object)`.
data_set = np.array(data_set, dtype=object, copy=False)
###Markdown
Active Learning LoopThe main active learning loop queries the unlabeled pool and thereby decides which documents are labeled next.We then provide the labels for those documents and the active learner retrains the model.After each query, we evaluate the current model against the test set and save the result.Note: This is active learning as it is done in a scientific simulation. In reality, the label feedback would have been given by human annotators.
###Code
from sklearn.metrics import accuracy_score
num_queries = 10
def evaluate(active_learner, train, test):
y_pred = active_learner.classifier.predict(train)
y_pred_test = active_learner.classifier.predict(test)
test_acc = accuracy_score(y_pred_test, test.y)
print('Train accuracy: {:.2f}'.format(accuracy_score(y_pred, train.y)))
print('Test accuracy: {:.2f}'.format(test_acc))
return test_acc
results = []
results.append(evaluate(active_learner, train[labeled_indices], test))
for i in range(num_queries):
# ...where each iteration consists of labelling 20 samples
q_indices = active_learner.query(num_samples=20)
# Simulate user interaction here. Replace this for real-world usage.
y = train.y[q_indices]
# Return the labels for the current query to the active learner.
active_learner.update(y)
labeled_indices = np.concatenate([q_indices, labeled_indices])
print('Iteration #{:d} ({} samples)'.format(i, len(labeled_indices)))
results.append(evaluate(active_learner, train[labeled_indices], test))
###Output
Train accuracy: 0.90
Test accuracy: 0.48
###Markdown
Plotting the ResultsWith the previously saved results we can plot a [learning curve](https://en.wikipedia.org/wiki/Learning_curve_(machine_learning)) to visualize the performance.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(12, 8))
ax = plt.axes()
ax.plot(np.arange(num_queries+1), results)
###Output
_____no_output_____ |
docs/notebooks/01_converting_propositional_datasets.ipynb | ###Markdown
Converting machine learning benchmark datasets[](https://colab.research.google.com/github/srlearn/relational-datasets/blob/main/docs/notebooks/01_converting_propositional_datasets.ipynb)[](https://mybinder.org/v2/gh/srlearn/relational-datasets/HEAD?filepath=docs%2Fnotebooks%2F01_converting_propositional_datasets.ipynb)[Alexander L. Hayes](https://hayesall.com): *Ph.D. Student, Indiana University*.**Abstract**: Most benchmark machine learning datasets have a *vector-based representation*, where we have a single type of object (people, images, houses) and we learn an *attribute* of those objects (disease risk, cat/dog, median price). This tutorial bridges the gap between vector-based machine learning and relational machine learning, and shows how to view the former in terms of the latter.Examples in this notebook are provided as documentation, and are available under the terms of the Apache 2.0 License.
###Code
!pip install numpy relational-datasets
from relational_datasets.convert import from_numpy
import numpy as np
###Output
_____no_output_____
###Markdown
Binary ClassificationWe're in a binary classification setting when the target array `y` contains 0/1 integers.
###Code
train, modes = from_numpy(
np.array([[0, 1, 1], [0, 1, 2], [1, 2, 2]]),
np.array([0, 0, 1]),
)
train.pos
train.neg
###Output
_____no_output_____
###Markdown
Here we are learning from a collection of **one type of object**. Since there is only one type of object, we can enumerate them with an `id`.The *positive examples* show that the object with `id3` is a positive instance of a class, and the *negative examples* show that objects `id1` and `id2` are not instances of this class.
###Code
train.facts
###Output
_____no_output_____
###Markdown
*Modes* are a type of *background knowledge* that show up in the fields of *Inductive Logic Programming* and *Statistical Relational Learning*. A full discussion of them is not feasible here, but briefly: modes provide (1) *type information* and help (2) *constrain the search space* during learning.> *Alexander did write a [slightly longer discussion about modes](https://hayesall.com/publications/construction-background-knowledge/) to accompany a Knowledge Capture article.*> > ILP/SRL can also be highly sensitive to this type of background knowledge. Andrew Cropper, Sebastijan Dumančić, and Stephen H. Muggleton include a more general treatment of refining and learning background knowledge in [their 2020 IJCAI article](https://www.ijcai.org/proceedings/2020/0673.pdf).Modes can be set automatically in the propositional setting. The ones below say: "When learning about a binary attribute `v4`, we will bind the `id` of an object to a specific instances (`id1`, `id2`, `id3`), and then learn about it with respect to specific values (``) of its attributes `v1`, `v2`, and `v3`."
###Code
modes
###Output
_____no_output_____
###Markdown
RegressionWhen `y` contains floating point numbers we're in a regression setting.
###Code
train, modes = from_numpy(
np.array([[0, 1, 1], [0, 1, 2], [1, 2, 2]]),
np.array([1.1, 0.9, 2.5]),
)
###Output
_____no_output_____
###Markdown
We represent this by marking all objects as "positive examples," but we want to learn about a *continuous value.*
###Code
train.pos
train.neg
train.facts
###Output
_____no_output_____
###Markdown
*Side Note*: Naming VariablesFrom the previous examples, we saw that names for the variables and targets were automatically assigned (with the last value `v4` being the target).The `from_numpy` function returns a tuple containing a `RelationalDataset` and a list of strings containing the modes. If an additional list of strings is passed, then those are used when converting the arrays.Here we invent a dataset where each `id` represents a person, and we want to learn about their risk for a condition in based on their age, BMI, and coronary artery calcification (cac) levels.
###Code
X = np.array([[1, 1, 2], [1, 1, 0], [0, 1, 0], [1, 1, 1], [0, 1, 1]])
y = np.array([0, 0, 1, 1, 0])
data, modes = from_numpy(
X,
y,
["age", "bmi", "cac", "highrisk"],
)
data.pos
data.neg
data.facts
modes
###Output
_____no_output_____
###Markdown
Worked example with scikit-learn's `load_breast_cancer`[`load_breast_cancer`](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_breast_cancer.html) is based on the Breast Cancer Wisconsin dataset.Here we: (**1**) load the data and class labels, (**2**) split into training and test sets, (**3**) bin the continuous features to discrete, and (**4**) convert to the relational format.
###Code
!pip install scikit-learn
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import KBinsDiscretizer
###Output
_____no_output_____
###Markdown
(1) Load the data, target, and variable namesInvoking `load_breast_cancer` returns a dictionary-like object with keys for `.data`, `.target`, `.feature_names`, and `.target_names`. We'll use these to pull out our `X` matrix, `y` array, and variable names.
###Code
breast_cancer = load_breast_cancer()
bc_X = breast_cancer.data
bc_y = breast_cancer.target
variable_names = [name.replace(" ", "") for name in breast_cancer.feature_names.tolist()] + [breast_cancer.target_names[1]]
bc_X
bc_y
variable_names
###Output
_____no_output_____
###Markdown
(2) Split out training and test sets
###Code
X_train, X_test, y_train, y_test = train_test_split(bc_X, bc_y)
###Output
_____no_output_____
###Markdown
(3) Discretize continuous features to discretescikit-learn's [`KBinsDiscretizer`](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.KBinsDiscretizer.html) will help us here, but we'll want an ordinal (0, 1, 2, 3, 4) encoding for our discrete features rather than the default one-hot encoding, and we need to ensure that the resulting matrices are converted back to integers.
###Code
disc = KBinsDiscretizer(n_bins=5, encode="ordinal")
X_train = disc.fit_transform(X_train)
X_test = disc.transform(X_test)
X_train = X_train.astype(int)
X_test = X_test.astype(int)
X_train
###Output
_____no_output_____
###Markdown
(4) Convert arrays to `RelationalDataset`Finally, let's convert our training and test folds into `RelationalDatasets` and `modes`:
###Code
bc_train, bc_modes = from_numpy(X_train, y_train, names=variable_names)
bc_test, _ = from_numpy(X_test, y_test, names=variable_names)
bc_modes
###Output
_____no_output_____ |
Coursera Stanford ML Python wiki.ipynb | ###Markdown
Python tutorial This tutorial loosely follows the topics covered in the Octave tutorial in week 2 of the course The modules needed to run this tutorial are imported below
###Code
%matplotlib inline
import numpy as np
from numpy.random import randn
import matplotlib.pyplot as plt
import scipy.io
import scipy.misc
###Output
_____no_output_____
###Markdown
Elementary arithmetic operations Python is capable of working like a calculator with some caveats.
###Code
5+6
3-2
5*8
###Output
_____no_output_____
###Markdown
Beware: integer division rounds the result down! You can implicitly convert to a float by adding a '.'
###Code
1/2
1./2
###Output
_____no_output_____
###Markdown
Exponents use the '**' operator
###Code
2**6
###Output
_____no_output_____
###Markdown
Logical operations Every object has a boolean value returned from bool(). The following elements are false:* None* False* 0* Empty collections: “”, (), [], {}
###Code
1 and 0 # AND
1 or 0 # OR
1 != 0 # XOR
bool([]) and True # False
a='foo'
b='bar'
bool(a) != bool(b)
b=None
bool(a) != bool(b)
###Output
_____no_output_____
###Markdown
Python variables and types Displaying variables Variables are displayed on the console by typing the variable name
###Code
b=3
b
from math import pi
b=pi
b
###Output
_____no_output_____
###Markdown
floating point numbers are formatted in two ways:The 'old' way (pre-python 2.7):
###Code
print '%1.4f'%b
###Output
3.1416
###Markdown
The 'new' way (python 2.7+):
###Code
print '{:1.5}'.format(b)
###Output
3.1416
###Markdown
Numpy basics Vectors and matrices
###Code
a=np.array([[1,2],[3,4],[5,6]]) # 3x2 numpy matrix
a
v=[1,2,3] # ordinary python list
v
v=np.array([1,2,3]) # numpy array
v
###Output
_____no_output_____
###Markdown
Use `np.arange(start, stop, increment)` to generate a sequence of floats in a numpy array
###Code
v=np.arange(1,2,0.1)
v
###Output
_____no_output_____
###Markdown
Use `tolist()` to convert a numpy array to a python list
###Code
v.tolist()
###Output
_____no_output_____
###Markdown
The `range()` built-in function generates integer sequences in a `list`
###Code
v=range(1,6)
v
###Output
_____no_output_____
###Markdown
numpy's `linspace` function generates a non-integer sequence with a specific number of elements
###Code
v=np.linspace(1,2,11)
v
###Output
_____no_output_____
###Markdown
Comprehensions list comprehensions List comprehensions allow you to create iterative code without using a loop
###Code
v=[1,2,3]
[e**2 for e in v]
[e**2 for e in v if e%2 !=0]
[e**2 if e%2 != 0 else -1 for e in v]
###Output
_____no_output_____
###Markdown
dictionary comprehensions Dictionary comprehensions allow to generate dictionaries without a loop
###Code
d = {'a':1, 'b':2, 'c':3}
{v: k for k, v in d.items()} # swap keys and values
{1: 'a', 2: 'b', 3: 'c'}
###Output
_____no_output_____
###Markdown
set comprehension Set comrehensions generate sets in a similar way
###Code
{x**2 for x in [1, 1, 2]}
set([1, 4])
###Output
_____no_output_____
###Markdown
Special matrix functions
###Code
ones=np.ones((3,2))
ones
3*ones
np.zeros((3,2))
###Output
_____no_output_____
###Markdown
Generate an array of uniform random numbers
###Code
np.random.rand(3,2)
###Output
_____no_output_____
###Markdown
Generate an array of normal random numbers
###Code
np.random.randn(3,2)
id=np.eye(3)
id
3*id
###Output
_____no_output_____
###Markdown
Moving data around shape and size of a matrix
###Code
a=np.random.rand(3,2)
a
a.shape
a.size
###Output
_____no_output_____
###Markdown
Loading files in python Reading the contents of a simple text file
###Code
file=open('ex6/emailSample1.txt', 'r')
file_contents=file.read()
file_contents
###Output
_____no_output_____
###Markdown
Loading image files
###Code
data = scipy.misc.imread('ex7/bird_small.png')
plt.imshow(data)
###Output
_____no_output_____
###Markdown
Loading the contents of a csv file
###Code
data = np.loadtxt('ex0.csv', delimiter=',')
data
###Output
_____no_output_____
###Markdown
Loading a Matlab formatted file
###Code
data = scipy.io.loadmat('ex3/ex3data1.mat')
data
###Output
_____no_output_____
###Markdown
Manipulating matrices Indexing and Slicing `a[start:end]` - items start through end-1 `a[start:]` - items start through the rest of the array `a[:end]` - items from the beginning through end-1 `a[:]` - a copy of the whole array There is also the step value, which can be used with any of the above: `a[start:end:step]` - start through not past end, by step
###Code
x = np.arange(10)
x
x[:]
x[1:]
x[:5]
x[2]
x[1:7:2]
###Output
_____no_output_____
###Markdown
Negative indices `a[-1]` - last item in the array `a[-2:]` - last two items in the array `a[:-2]` - everything except the last two items
###Code
x[:-2]
###Output
_____no_output_____
###Markdown
2d matrices are accessed in the row, column order
###Code
arr2d = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
arr2d
arr2d[2]
arr2d[0]
arr2d[0,1]
###Output
_____no_output_____
###Markdown
Boolean indexing Index selection can be done by filtering elements with boolean values
###Code
mat = np.array(['The', 'quick', 'brown', 'fox', 'jumped', 'over', 'the', 'lazy', 'dog']).reshape((3,3))
mat
rand = np.random.randn(3,3)>0
rand
mat[rand]
###Output
_____no_output_____
###Markdown
Flattening Reshaping from a higher dimensional to one dimensional order is called flattening
###Code
arr = np.arange(9).reshape((3,3))
arr
###Output
_____no_output_____
###Markdown
The `flatten()` function returns a copy of the array
###Code
arr.flatten()
###Output
_____no_output_____
###Markdown
flattening can be done columnwise
###Code
arr.flatten(1)
###Output
_____no_output_____
###Markdown
the `ravel()` function doesn't return a copy of the underlying data
###Code
arr.ravel()
###Output
_____no_output_____
###Markdown
Vector assignments Python doesn't create copies of underlying data on assignment statements
###Code
arr = np.arange(10)
arr
###Output
_____no_output_____
###Markdown
create a reference to some elements in the array and reassign them
###Code
slice=arr[4:8]
slice
slice[:]=-5
slice
slice[1]=50
slice
arr
###Output
_____no_output_____
###Markdown
now create a copy of the array explicitly and reassign
###Code
arr_copy=arr.copy()
arr_copy
arr_copy[4:8]=20
arr_copy
###Output
_____no_output_____
###Markdown
The original array is unchanged
###Code
arr
###Output
_____no_output_____
###Markdown
Horizontal and vertical concatenation There are two ways to concatenate
###Code
mat = np.array(['The', 'quick', 'brown', 'fox'])
mat2 = np.array(['jumped', 'over', 'the', 'lazy'])
###Output
_____no_output_____
###Markdown
Method 1: Use stacking
###Code
np.hstack((mat,mat2))
np.vstack((mat,mat2))
np.column_stack((mat,mat2))
###Output
_____no_output_____
###Markdown
Method 2: Use the `concatenate()` function applied to an axis
###Code
arr = np.arange(12).reshape((3, 4))
arr
np.concatenate((arr,arr), axis=1)
np.concatenate((arr,arr), axis=0)
arr = np.arange(5)
np.concatenate((arr,arr), axis=0)
###Output
_____no_output_____
###Markdown
Matrix multiplication
###Code
x=np.array([[1,2,3], [4,5,6], [7,8,9]])
y=np.array([[1,2,3], [4,5,6], [7,8,9]])
np.dot(x,y)
###Output
_____no_output_____
###Markdown
Matrix multiplication is done using the `dot()` function
###Code
x.dot(y)
###Output
_____no_output_____
###Markdown
Element-wise multiplication using the '*' operator
###Code
x*y
###Output
_____no_output_____
###Markdown
Element-wise squaring
###Code
x**2
###Output
_____no_output_____
###Markdown
Element-wise reciprical
###Code
1./x
###Output
_____no_output_____
###Markdown
Element-wise logarithms/exponents
###Code
np.log(x)
np.exp(x)
###Output
_____no_output_____
###Markdown
Element-wise addition
###Code
1+x
###Output
_____no_output_____
###Markdown
Transpose of a matrix
###Code
x.T
###Output
_____no_output_____
###Markdown
Maximum and minimum of matrix values
###Code
np.max(x)
np.min(x)
###Output
_____no_output_____
###Markdown
Sum and product of all elements
###Code
np.sum(x)
np.sum(x,axis=0)
np.sum(x,axis=1)
np.sum(x)
np.product(x)
np.product(x,axis=0)
np.product(x,axis=1)
###Output
_____no_output_____
###Markdown
Inverse and pseudo-inverse of a matrix
###Code
x=2*np.eye(3)
np.linalg.inv(x)
np.linalg.pinv(x)
###Output
_____no_output_____
###Markdown
Plotting data with matplotlib Creating/clearing figures| Plots reside within figures
###Code
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(np.random.randn(500), np.random.randn(500), np.random.randn(500), marker='o')
###Output
_____no_output_____
###Markdown
Subplots
###Code
fig, axes = plt.subplots(2,2, sharex=True, sharey=True)
color = (e for e in ['r', 'g', 'k', 'b'])
for i in range(2):
for j in range(2):
axes[i, j].hist(np.random.randn(500), bins=50, color=color.next(), alpha=0.5)
## Line color, labels, title and legend
fig, axes = plt.subplots(2,2)
axes[0,0].plot(randn(50).cumsum(), 'k--')
axes[0,1].hist(randn(100), bins=20, color='r', alpha=0.3)
axes[1,1].scatter(np.arange(30), np.arange(30) + 3 * randn(30), np.arange(30))
axes[1,0].plot(randn(1000).cumsum())
###Output
_____no_output_____
###Markdown
Control statements For loops
###Code
li = ['a', 'b', 'e']
for e in li:
print e
d = enumerate(li)
for k,v in d:
print k,v
###Output
0 a
1 b
2 e
###Markdown
While loops
###Code
count = 0
while (count <= 3):
print 'The count is:', count
count += 1
###Output
The count is: 0
The count is: 1
The count is: 2
The count is: 3
###Markdown
break statement
###Code
for n in range(2, 10):
for x in range(2, n):
if n % x == 0:
print n, 'equals', x, '*', n/x
break
else:
# loop fell through without finding a factor
print n, 'is a prime number'
###Output
3 is a prime number
4 equals 2 * 2
5 is a prime number
5 is a prime number
5 is a prime number
6 equals 2 * 3
7 is a prime number
7 is a prime number
7 is a prime number
7 is a prime number
7 is a prime number
8 equals 2 * 4
9 is a prime number
9 equals 3 * 3
###Markdown
if-elif-else statement
###Code
var = 100
if var == 200:
print "1 - Got a true expression value"
print var
elif var == 150:
print "2 - Got a true expression value"
print var
elif var == 100:
print "3 - Got a true expression value"
print var
else:
print "4 - Got a false expression value"
print var
###Output
3 - Got a true expression value
100
###Markdown
Python tutorial This tutorial loosely follows the topics covered in the Octave tutorial in week 2 of the course The modules needed to run this tutorial are imported below
###Code
%matplotlib inline
import numpy as np
from numpy.random import randn
import matplotlib.pyplot as plt
import scipy.io
import scipy.misc
###Output
_____no_output_____
###Markdown
Elementary arithmetic operations Python is capable of working like a calculator with some caveats.
###Code
5+6
3-2
5*8
###Output
_____no_output_____
###Markdown
Beware: integer division rounds the result down! You can implicitly convert to a float by adding a '.'
###Code
1/2
1./2
###Output
_____no_output_____
###Markdown
Exponents use the '**' operator
###Code
2**6
###Output
_____no_output_____
###Markdown
Logical operations Every object has a boolean value returned from bool(). The following elements are false:* None* False* 0* Empty collections: “”, (), [], {}
###Code
1 and 0 # AND
1 or 0 # OR
1 != 0 # XOR
bool([]) and True # False
a='foo'
b='bar'
bool(a) != bool(b)
b=None
bool(a) != bool(b)
###Output
_____no_output_____
###Markdown
Python variables and types Displaying variables Variables are displayed on the console by typing the variable name
###Code
b=3
b
from math import pi
b=pi
b
###Output
_____no_output_____
###Markdown
floating point numbers are formatted in two ways:The 'old' way (pre-python 2.7):
###Code
print '%1.4f'%b
###Output
3.1416
###Markdown
The 'new' way (python 2.7+):
###Code
print '{:1.5}'.format(b)
###Output
3.1416
###Markdown
Numpy basics Vectors and matrices
###Code
a=np.array([[1,2],[3,4],[5,6]]) # 3x2 numpy matrix
a
v=[1,2,3] # ordinary python list
v
v=np.array([1,2,3]) # numpy array
v
###Output
_____no_output_____
###Markdown
Use `np.arange(start, stop, increment)` to generate a sequence of floats in a numpy array
###Code
v=np.arange(1,2,0.1)
v
###Output
_____no_output_____
###Markdown
Use `tolist()` to convert a numpy array to a python list
###Code
v.tolist()
###Output
_____no_output_____
###Markdown
The `range()` built-in function generates integer sequences in a `list`
###Code
v=range(1,6)
v
###Output
_____no_output_____
###Markdown
numpy's `linspace` function generates a non-integer sequence with a specific number of elements
###Code
v=np.linspace(1,2,11)
v
###Output
_____no_output_____
###Markdown
Comprehensions list comprehensions List comprehensions allow you to create iterative code without using a loop
###Code
v=[1,2,3]
[e**2 for e in v]
[e**2 for e in v if e%2 !=0]
[e**2 if e%2 != 0 else -1 for e in v]
###Output
_____no_output_____
###Markdown
dictionary comprehensions Dictionary comprehensions allow to generate dictionaries without a loop
###Code
d = {'a':1, 'b':2, 'c':3}
{v: k for k, v in d.items()} # swap keys and values
{1: 'a', 2: 'b', 3: 'c'}
###Output
_____no_output_____
###Markdown
set comprehension Set comrehensions generate sets in a similar way
###Code
{x**2 for x in [1, 1, 2]}
set([1, 4])
###Output
_____no_output_____
###Markdown
Special matrix functions
###Code
ones=np.ones((3,2))
ones
3*ones
np.zeros((3,2))
###Output
_____no_output_____
###Markdown
Generate an array of uniform random numbers
###Code
np.random.rand(3,2)
###Output
_____no_output_____
###Markdown
Generate an array of normal random numbers
###Code
np.random.randn(3,2)
id=np.eye(3)
id
3*id
###Output
_____no_output_____
###Markdown
Moving data around shape and size of a matrix
###Code
a=np.random.rand(3,2)
a
a.shape
a.size
###Output
_____no_output_____
###Markdown
Loading files in python Reading the contents of a simple text file
###Code
file=open('ex6/emailSample1.txt', 'r')
file_contents=file.read()
file_contents
###Output
_____no_output_____
###Markdown
Loading image files
###Code
data = scipy.misc.imread('ex7/bird_small.png')
plt.imshow(data)
###Output
_____no_output_____
###Markdown
Loading the contents of a csv file
###Code
data = np.loadtxt('ex0.csv', delimiter=',')
data
###Output
_____no_output_____
###Markdown
Loading a Matlab formatted file
###Code
data = scipy.io.loadmat('ex3/ex3data1.mat')
data
###Output
_____no_output_____
###Markdown
Manipulating matrices Indexing and Slicing `a[start:end]` - items start through end-1 `a[start:]` - items start through the rest of the array `a[:end]` - items from the beginning through end-1 `a[:]` - a copy of the whole array There is also the step value, which can be used with any of the above: `a[start:end:step]` - start through not past end, by step
###Code
x = np.arange(10)
x
x[:]
x[1:]
x[:5]
x[2]
x[1:7:2]
###Output
_____no_output_____
###Markdown
Negative indices `a[-1]` - last item in the array `a[-2:]` - last two items in the array `a[:-2]` - everything except the last two items
###Code
x[:-2]
###Output
_____no_output_____
###Markdown
2d matrices are accessed in the row, column order
###Code
arr2d = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
arr2d
arr2d[2]
arr2d[0]
arr2d[0,1]
###Output
_____no_output_____
###Markdown
Boolean indexing Index selection can be done by filtering elements with boolean values
###Code
mat = np.array(['The', 'quick', 'brown', 'fox', 'jumped', 'over', 'the', 'lazy', 'dog']).reshape((3,3))
mat
rand = np.random.randn(3,3)>0
rand
mat[rand]
###Output
_____no_output_____
###Markdown
Flattening Reshaping from a higher dimensional to one dimensional order is called flattening
###Code
arr = np.arange(9).reshape((3,3))
arr
###Output
_____no_output_____
###Markdown
The `flatten()` function returns a copy of the array
###Code
arr.flatten()
###Output
_____no_output_____
###Markdown
flattening can be done columnwise
###Code
arr.flatten(1)
###Output
_____no_output_____
###Markdown
the `ravel()` function doesn't return a copy of the underlying data
###Code
arr.ravel()
###Output
_____no_output_____
###Markdown
Vector assignments Python doesn't create copies of underlying data on assignment statements
###Code
arr = np.arange(10)
arr
###Output
_____no_output_____
###Markdown
create a reference to some elements in the array and reassign them
###Code
slice=arr[4:8]
slice
slice[:]=-5
slice
slice[1]=50
slice
arr
###Output
_____no_output_____
###Markdown
now create a copy of the array explicitly and reassign
###Code
arr_copy=arr.copy()
arr_copy
arr_copy[4:8]=20
arr_copy
###Output
_____no_output_____
###Markdown
The original array is unchanged
###Code
arr
###Output
_____no_output_____
###Markdown
Horizontal and vertical concatenation There are two ways to concatenate
###Code
mat = np.array(['The', 'quick', 'brown', 'fox'])
mat2 = np.array(['jumped', 'over', 'the', 'lazy'])
###Output
_____no_output_____
###Markdown
Method 1: Use stacking
###Code
np.hstack((mat,mat2))
np.vstack((mat,mat2))
np.column_stack((mat,mat2))
###Output
_____no_output_____
###Markdown
Method 2: Use the `concatenate()` function applied to an axis
###Code
arr = np.arange(12).reshape((3, 4))
arr
np.concatenate((arr,arr), axis=1)
np.concatenate((arr,arr), axis=0)
arr = np.arange(5)
np.concatenate((arr,arr), axis=0)
###Output
_____no_output_____
###Markdown
Matrix multiplication
###Code
x=np.array([[1,2,3], [4,5,6], [7,8,9]])
y=np.array([[1,2,3], [4,5,6], [7,8,9]])
np.dot(x,y)
###Output
_____no_output_____
###Markdown
Matrix multiplication is done using the `dot()` function
###Code
x.dot(y)
###Output
_____no_output_____
###Markdown
Element-wise multiplication using the '*' operator
###Code
x*y
###Output
_____no_output_____
###Markdown
Element-wise squaring
###Code
x**2
###Output
_____no_output_____
###Markdown
Element-wise reciprical
###Code
1./x
###Output
_____no_output_____
###Markdown
Element-wise logarithms/exponents
###Code
np.log(x)
np.exp(x)
###Output
_____no_output_____
###Markdown
Element-wise addition
###Code
1+x
###Output
_____no_output_____
###Markdown
Transpose of a matrix
###Code
x.T
###Output
_____no_output_____
###Markdown
Maximum and minimum of matrix values
###Code
np.max(x)
np.min(x)
###Output
_____no_output_____
###Markdown
Sum and product of all elements
###Code
np.sum(x)
np.sum(x,axis=0)
np.sum(x,axis=1)
np.sum(x)
np.product(x)
np.product(x,axis=0)
np.product(x,axis=1)
###Output
_____no_output_____
###Markdown
Inverse and pseudo-inverse of a matrix
###Code
x=2*np.eye(3)
np.linalg.inv(x)
np.linalg.pinv(x)
###Output
_____no_output_____
###Markdown
Plotting data with matplotlib Creating/clearing figures| Plots reside within figures
###Code
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(np.random.randn(500), np.random.randn(500), np.random.randn(500), marker='o')
###Output
_____no_output_____
###Markdown
Subplots
###Code
fig, axes = plt.subplots(2,2, sharex=True, sharey=True)
color = (e for e in ['r', 'g', 'k', 'b'])
for i in range(2):
for j in range(2):
axes[i, j].hist(np.random.randn(500), bins=50, color=color.next(), alpha=0.5)
## Line color, labels, title and legend
fig, axes = plt.subplots(2,2)
axes[0,0].plot(randn(50).cumsum(), 'k--')
axes[0,1].hist(randn(100), bins=20, color='r', alpha=0.3)
axes[1,1].scatter(np.arange(30), np.arange(30) + 3 * randn(30), np.arange(30))
axes[1,0].plot(randn(1000).cumsum())
###Output
_____no_output_____
###Markdown
Control statements For loops
###Code
li = ['a', 'b', 'e']
for e in li:
print e
d = enumerate(li)
for k,v in d:
print k,v
###Output
0 a
1 b
2 e
###Markdown
While loops
###Code
count = 0
while (count <= 3):
print 'The count is:', count
count += 1
###Output
The count is: 0
The count is: 1
The count is: 2
The count is: 3
###Markdown
break statement
###Code
for n in range(2, 10):
for x in range(2, n):
if n % x == 0:
print n, 'equals', x, '*', n/x
break
else:
# loop fell through without finding a factor
print n, 'is a prime number'
###Output
3 is a prime number
4 equals 2 * 2
5 is a prime number
5 is a prime number
5 is a prime number
6 equals 2 * 3
7 is a prime number
7 is a prime number
7 is a prime number
7 is a prime number
7 is a prime number
8 equals 2 * 4
9 is a prime number
9 equals 3 * 3
###Markdown
if-elif-else statement
###Code
var = 100
if var == 200:
print "1 - Got a true expression value"
print var
elif var == 150:
print "2 - Got a true expression value"
print var
elif var == 100:
print "3 - Got a true expression value"
print var
else:
print "4 - Got a false expression value"
print var
###Output
3 - Got a true expression value
100
|
src/old_exams/exam/Cultural Diffusion v2.ipynb | ###Markdown
Conceptual descriptionBasis idee is een grid waarbij elke grid cell een agent is. Een agent heeft interactie met een random (=proportional to similarity) gekozen neighbor en neem dan een cultural trait over van zijn neighbor. Het originele model is van Axelrod (zie bijgevoegd paper). Er is ondertussen een hele literatuur (zie het Flache paper voor een recente review). discrete traits but implemented via numpy binary arrays
###Code
import collections
import random
import numpy as np
from mesa import Model, Agent
from mesa.time import RandomActivation
from mesa.space import SingleGrid
from mesa.datacollection import DataCollector
class CulturalDiff(Model):
"""
Model class for the Schelling segregation model.
Parameters
----------
height : int
height of grid
width : int
height of grid
seed : int
random seed
Attributes
----------
height : int
width : int
density : float
schedule : RandomActivation instance
grid : SingleGrid instance
"""
def __init__(self, height=20, width=20, seed=None):
super().__init__(seed=seed)
self.height = height
self.width = width
self.schedule = RandomActivation(self)
self.grid = SingleGrid(width, height, torus=True)
self.datacollector = DataCollector(model_reporters={'diversity':calculate_nr_of_cultures})
# Fill grid with agents with random traits
# Note that this implementation does not guarantee some set distribution of traits.
# Therefore, examining the effect of minorities etc is not facilitated.
for cell in self.grid.coord_iter():
profile = np.asarray([self.random.choice([0,1]) for _ in range(4)])
agent = CulturalDiffAgent(cell, self, profile)
self.grid.position_agent(agent, cell)
self.schedule.add(agent)
def step(self):
"""
Run one step of the model.
"""
self.datacollector.collect(self)
self.schedule.step()
class CulturalDiffAgent(Agent):
"""
Schelling segregation agent
Parameters
----------
pos : tuple of 2 ints
the x,y coordinates in the grid
model : Model instance
profile : ndarray
"""
def __init__(self, pos, model, profile):
super().__init__(pos, model)
self.pos = pos
self.profile = profile
def step(self):
#For each neighbor, calculate the similarity
neighbor_similarity_dict = {}
for neighbor in self.model.grid.neighbor_iter(self.pos, moore=True):
neighbor_similarity = np.sum(self.profile==neighbor.profile)
neighbor_similarity_dict[neighbor] = neighbor_similarity
# Proportional to this similarity, pick a 'random' neighbor to interact with
neighbor_to_interact = self.random.choices(list(neighbor_similarity_dict.keys()),
weights=neighbor_similarity_dict.values())[0]
# Select a trait that differs between the selected neighbor and self and change that trait in self
# we are using some numpy boolean indexing to make this short and easy
not_same_features = self.profile != neighbor_to_interact.profile
if np.any(not_same_features):
index_for_trait = self.random.choice(np.nonzero(not_same_features)[0])
self.profile[index_for_trait] = neighbor_to_interact.profile[index_for_trait]
def traits_to_color(profile):
""" Converts the traits of an agent to a list of RGBA values"""
color = profile.copy().astype(float)
if color[-1]==0:
color[-1] = 0.2
return color
def calculate_nr_of_cultures(model):
diversity = collections.defaultdict(int)
for (cell, i, j) in model.grid.coord_iter():
if cell:
diversity[tuple(cell.profile)] += 1
return len(diversity.keys())
###Output
_____no_output_____
###Markdown
Visualization Static images After initialization
###Code
model = CulturalDiff(seed=123456789)
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import seaborn as sns
import pandas as pd
def plot_model(model, ax):
grid = np.zeros((model.height, model.width, 4))
for (cell, i, j) in model.grid.coord_iter():
color = [0,0,0,0] #in case not every cell is filled, the default colour is white
if cell is not None:
color = traits_to_color(cell.profile)
grid[i,j] = color
plt.imshow(grid)
fig, ax = plt.subplots()
plot_model(model, ax)
plt.show()
for _ in range(250):
model.step()
model.datacollector.get_model_vars_dataframe().plot()
###Output
_____no_output_____ |
models/SIR_estimation.ipynb | ###Markdown
Johns Hopkins data
###Code
main_link = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/'
CONFIRMED = pd.read_csv(main_link+'time_series_19-covid-Confirmed.csv')
DEATHS = pd.read_csv(main_link+'time_series_19-covid-Deaths.csv')
RECOVERED = pd.read_csv(main_link+'time_series_19-covid-Recovered.csv')
###Output
_____no_output_____
###Markdown
Finnish HS data
###Code
import requests
import json
res = requests.get('https://w3qa5ydb4l.execute-api.eu-west-1.amazonaws.com/prod/finnishCoronaData')
finland_data = json.loads(res.content)
confirmed_finland = pd.DataFrame(finland_data['confirmed'])
deaths_finland = pd.DataFrame(finland_data['deaths'])
recovered_finland = pd.DataFrame(finland_data['recovered'])
confirmed_finland.date = pd.to_datetime(confirmed_finland.date)
confirmed_finland.date = confirmed_finland.date.dt.date
finland_total = confirmed_finland.groupby(['date'], as_index=False)['id'].count()
finland_total.tail()
finland_total.id.cumsum().tail()
plt.figure(figsize=(10, 7))
plt.plot(finland_total.date, finland_total.id.cumsum())
plt.show()
confirmed_uusima = confirmed_finland[confirmed_finland.healthCareDistrict.isin(['HUS'])]
total_uusima = confirmed_uusima.groupby(['date'], as_index=False)['id'].count()
total_uusima.tail()
total_uusima.id.cumsum().tail()
plt.figure(figsize=(10, 7))
plt.plot(total_uusima.date, total_uusima.id.cumsum())
plt.show()
max_R0 = 4.3
min_R0 = 1.1
dynamic_R0 = expit(np.linspace(-5, 3, num=60))[::-1]
dynamic_R0 = dynamic_R0 * (max_R0 - min_R0) + min_R0
dynamic_R0 = np.concatenate((dynamic_R0, np.repeat(dynamic_R0[-1], 400-len(dynamic_R0))))
plt.figure(figsize=(8, 6))
plt.plot(dynamic_R0[:60], linewidth=2, color='red')
plt.title("Dynamic R0 modelling")
plt.text(0, max_R0, f"R0: {max_R0}")
plt.text(60, min_R0, f"R0: {min_R0}")
plt.vlines(14, min_R0, max_R0, label='14 days (incubation period)', linestyles='dashed')
plt.vlines(30, min_R0, max_R0, label='30 days (quarantine start)', linestyles='dashed')
plt.ylabel("R0")
plt.xlabel("Days from start of infection")
plt.legend()
sns.despine(top=True, right=True, left=False, bottom=False)
# sir = SIR(N=1304851, I0=1/0.05, beta=dynamic_R0 * 0.0576 * 2, gamma=0.0576*2, days=150)
# S, I, R = sir.run()
# sir.plot_results(S, I, R)
# plt.figure(figsize=(10, 7))
# plt.plot(total_uusima.id.cumsum().values, label='actual reported')
# plt.plot(I[:20]/20, label='model')
# plt.title("Uusima region")
# plt.xlabel("Days from start of infection")
# plt.ylabel("Estimated reported number of cases")
# plt.legend()
# plt.grid()
total_uusima.id.cumsum().values
def calculate_dynamic_R0(max_R0, min_R0, reaction_days, simulation_days):
dynamic_R0 = expit(np.linspace(-5, 3, num=reaction_days))[::-1]
dynamic_R0 = dynamic_R0 * (max_R0 - min_R0) + min_R0
dynamic_R0 = np.concatenate((dynamic_R0, np.repeat(dynamic_R0[-1], simulation_days)))
return beta
from sklearn.metrics import mean_squared_log_error, mean_absolute_error, mean_squared_error
def loss(sir_params, actual_data):
max_R0, min_R0, reaction_days, gamma = sir_params
dynamic_R0 = calculate_dynamic_R0(max_R0, min_R0, int(reaction_days), len(actual_data))
sir = SIR(N=1304851, I0=1/0.05, beta=dynamic_R0 * gamma, gamma=gamma, days=len(actual_data))
S, I, R = sir.run()
return mean_absolute_error(I/20, actual_data)#np.mean(np.square(I/10-actual_data))
loss([6, 1, 60, 0.01], total_uusima.id.cumsum().values)
from scipy.optimize import minimize
%%time
max_R0 = 0
min_R0 = 0
reaction_days=0
gamma = 10
res = minimize(
loss,
x0=[max_R0, min_R0, reaction_days, gamma],
args=(total_uusima.id.cumsum().values),
#L-BFGS-B, TNC, SLSQP
method="SLSQP",
bounds=((2.1, 5), (1., 2.1), (15, 100), (0.0576, 0.0576*4))
)
print(res)
import seaborn as sns
max_R0, min_R0, reaction_days, gamma = res.x
print(max_R0, min_R0, reaction_days, gamma)
sir = SIR(
N=1304851, I0=1/0.1, beta=calculate_dynamic_R0(max_R0, min_R0, int(reaction_days), 300)*gamma,
gamma=gamma, days=150)
S, I, R = sir.run()
plt.figure(figsize=(8, 6))
plt.title("SIR model")
plt.plot(S, color='b', label='susceptible')
plt.plot(I, color='r', label='infected')
plt.plot(R, color='g', label='removed')
plt.legend()
plt.xlabel("Days from start of infection")
plt.ylabel("Cases")
sns.despine(top=True, right=True, left=False, bottom=False)
plt.show()
plt.figure(figsize=(8, 6))
plt.plot(total_uusima.id.cumsum().values)
plt.plot(I[:18]/10)
plt.title("Uusima region")
plt.xlabel("Days from start of infection")
plt.ylabel("Estimated reported number of cases")
sns.despine(top=True, right=True, left=False, bottom=False)
plt.grid(alpha=0.5, linestyle='dashed')
###Output
_____no_output_____ |
homework08.ipynb | ###Markdown
Провести дисперсионный анализ для определения того, есть ли различия среднего роста среди взрослых футболистов, хоккеистов и штангистов. Даны значения роста в трех группах случайно выбранных спортсменов: Футболисты: 173, 175, 180, 178, 177, 185, 183, 182. Хоккеисты: 177, 179, 180, 188, 177, 172, 171, 184, 180. Штангисты: 172, 173, 169, 177, 166, 180, 178, 177, 172, 166, 170. **уровень значимости не указан, возьмем альфа = 0.05**
###Code
import numpy as np
f = np.array([173, 175, 180, 178, 177, 185, 183, 182])
h = np.array([177, 179, 180, 188, 177, 172, 171, 184, 180])
s = np.array([172, 173, 169, 177, 166, 180, 178, 177, 172, 166, 170])
nf = f.size
nh = h.size
ns = s.size
n = nf + nh + ns
print(nf, nh, ns, n)
# три группы
k = 3
###Output
_____no_output_____
###Markdown
средний рост по группам:
###Code
print(f.mean(), h.mean(), s.mean())
###Output
179.125 178.66666666666666 172.72727272727272
###Markdown
Найдем средний рост по всем группам:
###Code
all = np.concatenate([f, h, s])
print(all.mean())
###Output
176.46428571428572
###Markdown
Найдем сумму квадратов отклонений наблюдений от общего среднего:
###Code
s2 = np.sum((all - all.mean())**2)
s2
###Output
_____no_output_____
###Markdown
Найдем сумму квадратов отклонений средних групповых значений от общего среднего:
###Code
s2f = nf*(f.mean() - all.mean())**2 + ns*(s.mean() - all.mean())**2 + nh*(h.mean() - all.mean())**2
s2f
###Output
_____no_output_____
###Markdown
Найдем остаточную сумму квадратов отклонений:
###Code
s2r = np.sum((f - f.mean())**2) + np.sum((h - h.mean())**2) + np.sum((s - s.mean())**2)
s2r
###Output
_____no_output_____
###Markdown
Убедимся что s2 = s2f + s2r
###Code
print(s2, s2f + s2r)
###Output
830.9642857142854 830.964285714286
###Markdown
Общая дисперсия:
###Code
sigma2g = s2/(n-1)
sigma2g
###Output
_____no_output_____
###Markdown
Факторая дисперсия:
###Code
sigma2f = s2f / (k-1)
sigma2f
###Output
_____no_output_____
###Markdown
Остаточная дисперсия:
###Code
sigma2r = s2r/(n-k)
sigma2r
###Output
_____no_output_____
###Markdown
Вычислим Fh:
###Code
Fh = sigma2f/sigma2r
Fh
#степени свободы:
print(k-1,n-k)
###Output
2 25
###Markdown
Найдем значение Fkr в таблике критических точек распределение Фишера-Снедекора для уровня значимости 0.05
###Code
Fkr = 3.38
###Output
_____no_output_____
###Markdown
Гипотеза H0: средний рост футболистов, хоккеистов и штангистов одинаковый \Гипотеза H1: средний рост в трех группах различный
###Code
Fh>Fkr
###Output
_____no_output_____
###Markdown
эмпирическое корреляционное отношение:
###Code
eta2= s2f/s2
eta2
###Output
_____no_output_____
###Markdown
значение корреляционного отношения получилось очень маленьким и расходится с выводом, полученным через распределение Фишера-Снедекора с уровнем значимости 0.05. Вычислим Fkr для уровня значимости 0.01
###Code
Fkr = 5.57
Fh > Fkr
###Output
_____no_output_____ |
13_unsupervised_learning/03_clustering_algorithms/03_kmeans_evaluation.ipynb | ###Markdown
K-Means: Evaluating cluster quality Cluster quality metrics help select among alternative clustering results. This notebook illustrates several options, namely inertia and the silhouette scores. Imports & Settings
###Code
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
from time import sleep
import numpy as np
from numpy.random import seed
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import seaborn as sns
from IPython import display
seed(42)
sns.set_style('white')
cmap = ListedColormap(sns.xkcd_palette(['denim blue',
'medium green',
'pale red']))
cmap = ListedColormap(sns.color_palette('Paired', 10))
###Output
_____no_output_____
###Markdown
2D Cluster Demo
###Code
def sample_clusters(n_points=500,
n_dimensions=2,
n_clusters=5,
cluster_std=1):
return make_blobs(n_samples=n_points,
n_features=n_dimensions,
centers=n_clusters,
cluster_std=cluster_std,
random_state=42)
###Output
_____no_output_____
###Markdown
Evaluate Number of Clusters using Inertia The k-Means objective function suggests we compare the evolution of the inertia or within-cluster variance. Initially, additional centroids decrease the inertia sharply because new clusters improve the overall fit. Once an appropriate number of clusters has been found (assuming it exists), new centroids reduce the within-cluster variance by much less as they tend to split natural groupings. Hence, when k-Means finds a good cluster representation of the data, the inertia tends to follow an elbow-shaped path similar to the explained variance ratio for PCA.
###Code
def inertia_plot_update(inertias, ax, delay=1):
inertias.plot(color='k',
lw=1,
title='Inertia',
ax=ax,
xlim=(inertias.index[0], inertias.index[-1]),
ylim=(0, inertias.max()))
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
display.display(plt.gcf())
display.clear_output(wait=True)
sleep(delay)
def plot_kmeans_result(data, labels, centroids,
assignments, ncluster, Z, ax):
# plot data
ax.scatter(*data.T, c=labels, s=20, cmap=cmap)
# plot cluster centers
ax.scatter(*centroids.T,
marker='o',
c='w',
s=200,
edgecolor='k',
zorder=9)
for i, c in enumerate(centroids):
ax.scatter(*c,
marker=f'${i}$',
s=50,
edgecolor='',
zorder=10)
xy = pd.DataFrame(data[assignments == i],
columns=['x', 'y']).assign(cx=c[0],
cy=c[1])
ax.plot(xy[['x', 'cx']].T,
xy[['y', 'cy']].T,
ls='--',
color='k',
lw=0.5)
# plot voronoi
ax.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=cmap,
aspect='auto',
origin='lower',
alpha=.2)
ax.set_title(f'Number of Clusters: {ncluster}')
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Run Elbow Experiment
###Code
n_clusters, max_clusters = 4, 7
cluster_list = list(range(1, max_clusters + 1))
inertias = pd.Series(index=cluster_list)
data, labels = sample_clusters(n_clusters=n_clusters)
x, y = data.T
xx, yy = np.meshgrid(np.arange(x.min() - 1, x.max() + 1, .01),
np.arange(y.min() - 1, y.max() + 1, .01))
fig, axes = plt.subplots(ncols=3, nrows=3, figsize=(16, 9))
axes = np.array(axes).flatten()
# Plot Sample Data
axes[0].scatter(x, y,
c=labels, s=10,
cmap=cmap)
axes[0].set_title('{} Sample Clusters'.format(n_clusters))
for ax in axes:
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
sns.despine();
for c, n_clusters in enumerate(range(1, max_clusters + 1), 2):
kmeans = KMeans(n_clusters=n_clusters, random_state=42).fit(data)
centroids, assignments, inertia = kmeans.cluster_centers_, kmeans.labels_, kmeans.inertia_
inertias[n_clusters] = inertia
inertia_plot_update(inertias, axes[1])
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)
plot_kmeans_result(data, labels, centroids, assignments, n_clusters, Z, axes[c])
fig.tight_layout()
###Output
_____no_output_____
###Markdown
Evaluating the Silhouette Score The [silhouette coefficient](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.silhouette_score.html) provides a more detailed picture of cluster quality. It answers the question: how far are the points in the nearest cluster relative to the points in the assigned cluster?To this end, it compares the mean intra-cluster distance (a) to the mean distance of the nearest-cluster (b) and computes the following score s:$$s=\frac{b−a}{\max(a,b)}\quad\in{[−1, 1]}$$The score can vary between -1 and 1, but negative values are unlikely in practice because they imply that the majority of points are assigned to the wrong cluster. A useful visualization of the silhouette score compares the values for each data point to the global average because it highlights the coherence of each cluster relative to the global configuration. The rule of thumb is to avoid clusters with mean scores below the average for all samples.The following figure shows an excerpt from the silhouette plot for three and four clusters, where the former highlights the poor fit of cluster 1 by sub-par contributions to the global silhouette score, whereas all of the four clusters have some values that exhibit above-average scores.
###Code
def plot_silhouette(values, y_lower, i, n_cluster, ax):
cluster_size = values.shape[0]
y_upper = y_lower + cluster_size
color = plt.cm.viridis(i / n_cluster)
ax.fill_betweenx(np.arange(y_lower, y_upper), 0, values,
facecolor=color, edgecolor=color, alpha=0.7)
ax.text(-0.05, y_lower + 0.5 * cluster_size, str(i))
y_lower = y_upper + 10
return y_lower
def format_silhouette_plot(ax):
ax.set_title("Silhouette Plot")
ax.set_xlabel("Silhouette Coefficient")
ax.set_ylabel("Cluster Label")
ax.axvline(x=silhouette_avg,
color='red',
linestyle='--',
lw=1)
ax.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
def plot_final_assignments(x, y, centroids,
assignments, n_cluster, ax):
c = plt.cm.viridis(assignments / n_cluster)
ax.scatter(x, y, marker='.', s=30,
lw=0, alpha=0.7, c=c, edgecolor='k')
ax.scatter(*centroids.T, marker='o',
c='w', s=200, edgecolor='k')
for i, c in enumerate(centroids):
ax.scatter(*c, marker='${}$'.format(i),
s=50, edgecolor='k')
ax.set_title('{} Clusters'.format(n_cluster))
n_clusters = 4
max_clusters = 7
cluster_list = list(range(1, max_clusters + 1))
inertias = pd.Series(index=cluster_list)
data, labels = sample_clusters(n_clusters=n_clusters)
x, y = data.T
fig, axes = plt.subplots(ncols=2,
nrows=max_clusters,
figsize=(12, 20))
axes[0][0].scatter(x, y, c=labels, s=10, cmap=cmap)
axes[0][0].set_title('Sample Clusters')
for i in range(max_clusters):
for j in [0, 1]:
axes[i][j].axes.get_xaxis().set_visible(False)
axes[i][j].axes.get_yaxis().set_visible(False)
sns.despine()
for row, n_cluster in enumerate(range(2, max_clusters + 1), 1):
kmeans = KMeans(n_clusters=n_cluster,
random_state=42).fit(data)
centroids, assignments, inertia = (kmeans.cluster_centers_,
kmeans.labels_,
kmeans.inertia_)
inertias[n_cluster] = inertia
inertia_plot_update(inertias, axes[0][1])
silhouette_avg = silhouette_score(data, assignments)
silhouette_values = silhouette_samples(data, assignments)
silhouette_plot, cluster_plot = axes[row]
y_lower = 10
for i in range(n_cluster):
y_lower = plot_silhouette(np.sort(silhouette_values[assignments == i]),
y_lower,
i,
n_cluster,
silhouette_plot)
format_silhouette_plot(silhouette_plot)
plot_final_assignments(x, y, centroids, assignments,
n_cluster, cluster_plot)
fig.tight_layout()
fig.suptitle(f'KMeans Silhouette Plot with {n_clusters} Clusters',
fontsize=14)
fig.tight_layout()
fig.subplots_adjust(top=.95)
###Output
_____no_output_____ |
_build/html/_sources/curriculum-notebooks/Mathematics/DataRepresentation/data-representation.ipynb | ###Markdown

###Code
from IPython.display import HTML
hide_me = ''
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show) {
$('div.input').each(function(id) {
el = $(this).find('.cm-variable:first');
if (id == 0 || el.text() == 'hide_me') {
$(this).hide();
}
});
$('div.output_prompt').css('opacity', 0);
} else {
$('div.input').each(function(id) {
$(this).show();
});
$('div.output_prompt').css('opacity', 1);
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
<form action="javascript:code_toggle()"><input style="opacity:1" type="submit" value="Click here to toggle on/off the raw code."></form>''')
hide_me
from ipywidgets import interact
import ipywidgets as widgets
import IPython
import matplotlib.pyplot as plt
import numpy as np
import math
import plotly as py
import plotly.graph_objs as go
import pylab
from IPython.display import Image, HTML, YouTubeVideo
###Output
_____no_output_____
###Markdown
Data Representation in Graphs Grade 8 curriculum Data plays an ever-increasing role in our lives. Like it or not, we are faced with numerical information every day, and we use it to make decisions. Should I be glad that 9 out of 10 dentists recommend my toothpaste? What about the 10th? A new study says that going for a run at 5 a.m. every morning reduces my risk of catching some terrible disease by 15%. Is it worth getting out of bed?It's often hard to find meaning in data if it's just a bunch of numbers on a page, so we make that easier by using graphs. Graphs take data and turn them into pictures—bars, lines, circles, and more. But not all graphs are created equal; some do their jobs better than others. A good graph is a perfect tool for understanding a problem. A bad graph can be confusing, or in some cases, intentionally misleading.Graphs are used every day by news media, politicians, and scientists to convey information. Some use them well; some do not. In this notebook, we'll explore good and bad examples of graphs. By working through the examples and exercises in this notebook, you'll learn:- how to decide which type of graph is best for a given set of data;- how to identify flawed or misleading graphs;- how some of those flaws can be corrected; and- most importantly, how to read a graph and apply its meaning to your everyday life. *Many of the examples of bad graphs you'll find below are from the media (and one source in particular). This notebook isn't trying to criticize these sources. They just happen to have given us a lot of examples to choose from.* What makes a good graph?First and most importantly, a graph should allow a reader, at a glance, to understand the information it's meant to convey. A good graph is like a good movie scene; if it's set up well, you can tell exactly what you're supposed to know. Some basic parts of a successful graph are:1. A title2. Proper labels3. Axes that start at zero (if numerical)4. Percentages that add to 100%5. Easy to read6. Use of colours, *as long as they are meaningful* and not just for show*By the way: **axes** (ACK-sees) are the reference lines on a graph. They're where you find the names of the categories (usually at the bottom) and the number scale (usually on the left). One of these lines is called an **axis** (ACK-sis).* For a quick overview of different types of graphs and simple examples, you might find this [Math Is Fun](https://www.mathsisfun.com/data/pictographs.html) article useful. We'll look at some of these kinds of graphs below. You'll notice many of them are eye-catching, and they also convey information really well.One of the places you'll find a lot of graphs is in political coverage. The media (and many of their readers/viewers) love a good "horse race". For example, this [CBC federal poll tracking article](http://www.cbc.ca/news/politics/poll-tracker-federal-poll-averages-and-seat-projections-1.4171977) uses almost every type of graph you'll find in this notebook.We'll also explore how a graph can be used to [mislead someone](http://teachersinstitute.yale.edu/curriculum/units/2008/6/08.06.06.x.html). We hope this notebook will help you learn how to avoid using misleading graphs, as well as how to avoid being misled yourself.There's even a [wall of shame](http://bcuchta.com/wall_of_shame/) with some of the worst graphs and charts! Let's look at bar graphs What is a bar graph?A bar graph is a graph where data is separated into categories, and those categories are shown as bars with different heights. It's a very useful graph, but it can also easily be misleading.from [Math is Fun](https://mathsisfun.com/data/images/bar-graph-fruit.svg) When are bar graphs good to use?Bar graphs can be used in many ways, but they usually show one piece of information collected from many groups or categories. For example, they might show the number of hours worked by people in different age groups, or how many grey shirts each girl in a class owns. What are some ways to misuse bar graphs?1. **Make the scale on the graph start above zero.** This makes small differences between bars look much bigger than they really are.2. **Change the width of the bars to make one category look more important.** This gives one bar more area, which looks like more data.3. **Remove the space between the bars** (that's a **histogram**). Histograms are used for a different kind of data set, and so they are read in a different way.Here's an example of a poorly made bar graph. It shows the total welfare (support money) received by people in the US from 2009 to 2011. Each year is divided into 4 three-month pieces called **quarters**.from [MediaMatters](https://www.mediamatters.org/fox-news/today-dishonest-fox-charts-government-aid-edition)What makes this a bad bar graph?1. Their scale starts at 94 million insead of 0.2. The bars are in 3D, making their values harder to read.3. Their y-axis has 8 labels, but there are 10 levels on the graph (including the top and bottom).Whoever made this graph probably wanted the viewer to think welfare in the US is rising faster than it really is. Now, let's ask ourselves:- What can we change to make this a good graph?- How does it look after we make these changes?- Why might the original creators not want to use our new graph?One way we can improve this graph is by changing where its scale starts. Play with the slider below to see how the graph looks with different scales. *Slide the slider below to change the starting point of the $y$-axis. The initial position corresponds to the graph in the image above. As you move the slider to the left, the starting point for the $y$-axis is reduced to zero.**Warning: This graph is slow to respond, please be patient with it.*
###Code
hide_me
columns = ['09-Q1', '09-Q2', '09-Q3', '09-Q4', '10-Q1', '10-Q2','10-Q3', '10-Q4', '11-Q1', '11-Q2']
#fig, ax = plt.subplots()
def plot(yaxis=94):
y = [97, 98, 99, 101, 104, 105, 106, 107, 107.5, 108]
x = np.arange(len(y))
fig, ax = plt.subplots(figsize=(10,4))
ax.bar(x, y, width=0.5)
ax.set_xticks(x)
ax.set_xticklabels(columns)
ax.set_ylim((yaxis,110))
ax.set_title("Federal Welfare Received in the US")
interact(plot, yaxis=(0,90), continuous_update = True, wait = False)
#plt.show()
###Output
_____no_output_____
###Markdown
Let's look at pictographs What is a pictograph?A pictograph is a way to show data using images, where each image represents a certain number of things that are being measured. They look a lot like bar graphs and they can be horizontal or vertical too.from [Math is Fun](https://www.mathsisfun.com/data/images/pictograph-tennis.svg) Why do people like to use pictographs?The main reason is because the pictures offer something for readers to connect with other than just a row of coloured bars.Also, pictographs often work best to show data with small numbers. If everything can be easily expressed with a simple scale like the one above, then a pictograph might be the right choice to represent the data. When are pictographs not a good choice?In the example above, what if Sam played 46 games instead of 45? This pictogram counts games in steps of 5, so numbers in between these steps might be hard or impossible to show.A reader might also make a connection with a pictograph that wasn't intended. Let's show this with an example.On Halloween, Shayna and Michael went trick-or-treating. Shayna got 18 pieces of candy, and Michael got 36. Their totals are shown in this pictograph:from [teachersinstitute.yale.edu](http://teachersinstitute.yale.edu/curriculum/units/2008/6/08.06.06.x.html)At first, is looks like a fine way to show how much candy each child got. The heights of the candy corn pieces are being used to mark the two amounts. But as a viewer, we don't see just the height—we also see the width. Not only is the second candy corn twice as high, it's also twice as wide, giving it four times the area as the first candy corn. This makes it *look like* Michael got 4 times as much candy as Shayna, even though he only got twice as much.Click the "Display" button below to show a better, more accurate way to represent the same data:
###Code
hide_me
pic = Image('images/CandyCornGraph.png')
clicker = widgets.Checkbox(value=False, description='Display', disabled=False)
def checking(a):
if clicker.value == True:
IPython.display.display(pic)
else:
IPython.display.clear_output()
IPython.display.display(clicker)
IPython.display.display(clicker)
clicker.observe(checking, 'value')
###Output
_____no_output_____
###Markdown
Let's look at line graphs What is a line graph?A line graph is a way to show how the measurement of one value responds to changes in another, like how something may change over time. In fact, time is one of the most common variables with a line graph.from [Math is Fun](https://www.mathsisfun.com/data/images/line-graph-example.svg) Why are line graphs useful?They show a moving trend with a line that's easy to follow, instead of just dots on a graph. They work best when the trend involves the change of one variable (jobs, temperature, debt) with respect to another (usually time).In some cases it can also be useful to plot multiple lines on one graph, usually with different colours to help tell them apart. For example, one might plot polling results for different political parties over time, as with this graph from the CBC:from [cbc.ca](http://www.cbc.ca/polopoly_fs/1.3265490!/fileImage/httpImage/image.jpg) How can line graphs go wrong?A common error with line graphs is unlabelled axes. A graph might show a line that slopes upwards, but without labels, we wouldn't know what is growing or how fast. Also, line graphs can trick us into thinking a trend is linear by spacing out the ticks unevenly on one axis, so that the data points neatly line up. Like this example:from [Online Stat Book](http://onlinestatbook.com/2/graphing_distributions/graphics/graph2.png)
###Code
hide_me
fix = widgets.SelectionSlider(options=['original', 'fixed'], value ='original', description='Slide to fix',
continuous_update=True, orientation='horizontal',)
def fixing(a):
if fix.value == 'fixed':
IPython.display.clear_output()
IPython.display.display(fix)
f, ax1 = plt.subplots(1,1,figsize=(10,5))
ax1.set_title("Job Loss by Quarter")
ax1.set_xlabel('Months from November 07',fontsize=15)
ax1.set_ylabel("Jobs Lost in Millions",color='b',fontsize=15)
x1 = [1, 10, 16, 29]
y1 = [7,9,13.5,15]
ax1.plot(x1, y1,"bo-")
plt.legend()
plt.show()
else:
IPython.display.clear_output()
IPython.display.display(fix)
f, ax1 = plt.subplots(1,1,figsize=(10,5))
ax1.set_title("Job Loss by Quarter")
ax1.set_xlabel('Months from November 07',fontsize=15)
ax1.set_ylabel("Jobs Lost in Millions",color='b',fontsize=15)
x1 = [0,7,23,29]
y1 = [7,9,13.5,15]
ax1.plot(x1, y1,"bo-")
plt.legend()
plt.show()
IPython.display.display(fix)
fix.observe(fixing, 'value')
IPython.display.clear_output()
IPython.display.display(fix)
f, ax1 = plt.subplots(1,1,figsize=(10,5))
ax1.set_title("Job Loss by Quarter")
ax1.set_xlabel('Months from November 07',fontsize=15)
ax1.set_ylabel("Jobs Lost in Millions",color='b',fontsize=15)
x1 = [0,7,23,29]
y1 = [7,9,13.5,15]
ax1.plot(x1, y1,"bo-")
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Let's look at circle graphs What is a circle graph?Also known as a pie chart, a circle graph is used to show how a total is split into different groups. The whole pie represents the total, and each slice of the pie represents a different group. Each slice gets as much of the pie as its group has of the total—the bigger the slice, the more of the total that group represents.from [Math is Fun](https://www.mathsisfun.com/data/images/pie-chart-movies.svg) Why are circle graphs useful?They make it easy to compare group sizes; if there's a biggest or smallest group, that's easy to see, since group sizes are shown as pieces of a whole. Why might people not use circle graphs?To be displayed as a circle graph, data must be converted into percentages of the total, then into slices of a circle, which is more work than other graphs need. Plus, it's easy to mess up if the data are not converted properly (or at all). Circle graphs are also hard to draw accurately on paper, since you need a protractor to ensure your angles are correct. Some people might even say that any time a circle graph would do, a bar graph would do better, and that the pie chart below is the only acceptable one.from [Flowing Data](https://i1.wp.com/flowingdata.com/wp-content/uploads/2008/09/Pie-I-have-Eaten.jpg) ******************** What's wrong with these graphs?[Business Insider](https://amp.businessinsider.com/images/51cb26c469beddf14c000015-750-453.jpg)[Flowing Data](http://flowingdata.com/wp-content/uploads/yapb_cache/app15725951258947184.acq6gmp0hf4sowckg80ssc8wg.2xne1totli0w8s8k0o44cs0wc.th.png)[Flowing Data](https://i0.wp.com/flowingdata.com/wp-content/uploads/2013/03/130207SuperBowlPoll.jpg?fit=500%2C430&ssl=1) What was wrong with these graphs?1. Mislabeled/Missing axes2. Plotted wrong3. Hard to read4. Numbers don't add to 100%5. Wrong data shown The video below goes through several examples of bad/misleading graphs (some of them shown in this notebook) and why they are not good representations of the original data.
###Code
hide_me
YouTubeVideo('1F7gm_BG0iQ')
###Output
_____no_output_____
###Markdown
************* Practice Questions Question 1A group of kids was asked **what they do first** when they get home from school. The data are shown in the table below. [Data source here](http://www.ur.umich.edu/9900/Apr03_00/7.htm)| Activity | Percent||-----------------|-----|| Eat | 27% || Personal Care | 19% || Watch TV | 15% || Study | 13% || Play | 9% || Other | 17% |
###Code
hide_me
answer = widgets.RadioButtons(options=['','circle graph', 'line graph', 'bar graph', 'pictograph'],
value='', description='Answer:')
labels = ['Eat', 'Personal Care', 'Watch TV', 'Study', 'Play', 'Other']
data = [0.27, 0.19, 0.15, 0.13, 0.09, 0.17]
def display():
print('What would be the best graph to display this set of data?')
IPython.display.display(answer)
def check(a):
IPython.display.clear_output(wait=False)
display()
if answer.value == 'circle graph':
print("Correct! Circle graphs are used for percentages.")
print("Let's see this data in a circle graph.")
patches, texts = plt.pie(data, labels=labels)
plt.axis('equal')
plt.tight_layout()
plt.show()
else:
if answer.value == 'bar graph':
print("A Bar graph would work, but there's a better option. Try again.")
else:
if answer.value == 'line graph':
print("Line graphs are good for change over time, not percentages. Try again.")
else:
print("A pictograph would work if the data was in amounts instead of percentages. Try again.")
display()
answer.observe(check, 'value')
###Output
_____no_output_____
###Markdown
Question 2A group of kids was asked **how much time** they spend doing different activities after school. The data are shown in the table below. [Data source here](http://www.ur.umich.edu/9900/Apr03_00/7.htm)| Activity | Time spent (minutes)||-----------------|-----|| Reading | 30 || Chores | 30 || Watch TV | 100 || Study | 60 || Play | 74 || Sports | 60 |
###Code
hide_me
answer2 = widgets.RadioButtons(options=['','circle graph', 'line graph', 'bar graph', 'pictograph'],
value='', description='Answer:')
labels2 = ['Reading', 'Chores', 'Watch TV', 'Study', 'Play', 'Sports']
data2 = [30, 30, 100, 60, 74, 60]
x = np.arange(len(data2))
def display2():
print('What would be the best graph to display this set of data?')
IPython.display.display(answer2)
def check(a):
IPython.display.clear_output(wait=False)
display2()
if answer2.value == 'circle graph':
print("A circle graph is used for percentages. Try again.")
else:
if answer2.value == 'bar graph':
print("Correct! A bar graph shows the relation between both parameters in an easy to read format.")
print("Let's see what that looks like.")
plt.bar(x, data2, width = .3)
plt.xticks(x, labels2)
plt.ylabel('Time in Minutes')
plt.title('Time Spent on Afterschool Activities')
plt.show()
else:
if answer2.value == 'line graph':
print("Line graphs are good for change over time. Try again.")
else:
print("A pictograph would work, but there's a better option to be more accurate. Try again.")
display2()
answer2.observe(check, 'value')
###Output
_____no_output_____ |
datascience/stats_hypothesis_testing_1.ipynb | ###Markdown
Hypothesis testing Problem 1 An automatic bottling machine fills cola into 2 lt (2000 cm3) bottles. A consumer advocate wants to testthe null hypothesis that the average amount filed by the machine into the bottle is at least 2,000 cm3. Arandom sample of 40 bottles coming out of the machine was selected and the exact content of theselected bottles are recorded. The sample mean was 1,999.6 cm3. The population standard deviation isknown from the past experience to be 1.30 cm3.1. Test the null hypothesis at an alpha of 5% Solution 1 Null Hypothesis (ho), Alternate Hypothesis (ha) ho = mu > or = 2000 cm3population mean, mu = 2000 cm3ha = mu < 2000 cm3
###Code
xbar = 1999.6
import scipy.stats as st
import math as m
n = 40
pop_std = 1.3 # sigma
p = st.norm.cdf(1999.6, loc=2000, scale=1.3/m.sqrt(n))
p
###Output
_____no_output_____
###Markdown
Inference: p-value is less than alpha; hence we reject the null hypothesis Continuation problem Assume that the population is normally distributed with the same sd of 1.30 cm3. Assume that the sample size is only 20 but the sample mean is the same 1,999.6 cm3. Conduct the test once again at an alpha of 5%.
###Code
p = st.t
p
p = st.norm.cdf(1999.6, 2000, 1.3/m.sqrt(20))
p
###Output
_____no_output_____ |
Week4/Assignment 4B/Model6.ipynb | ###Markdown
###Code
import time
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from keras.datasets import cifar10
#loading data
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
num_train, img_channels, img_rows, img_cols = x_train.shape
num_test = x_test.shape[0]
num_classes = len(np.unique(y_train))
# Classes
class_names = ['airplane', 'automobile', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
# multi line f-string : https://stackoverflow.com/questions/45965007/multiline-f-string-in-python
print(f"""
Train images : {num_train}
Test images : {num_test}
Total classes : {num_classes}
Input image shape : {x_train.shape[1:]}
""")
# checking some random images
fig = plt.figure(figsize=(10,5))
for i in range(num_classes):
ax = fig.add_subplot(2, 5, i+1, xticks=[], yticks=[])
idx = np.where(y_train[:]==i)[0] # selecting image_idx of single class
features = x_train[idx,::]
img_num = np.random.randint(features.shape[0])
im = features[img_num]
ax.set_title(class_names[i])
plt.imshow(im)
plt.show()
# display n random images for each class
n = 10
r, c = 10, n
fig = plt.figure(figsize=(10,10))
fig.subplots_adjust(hspace=0.01, wspace=0.01)
for i in range(num_classes):
idx = np.random.choice(np.where(y_train[:]==i)[0], c, replace=False)
ax = plt.subplot(r, c, i*c+1)
ax.text(-1.5, 0.5, class_names[i], fontsize=14)
plt.axis('off')
for j in range(1, c):
plt.subplot(r, c, i*c+j+1)
plt.imshow(x_train[idx[j-1]], interpolation='none')
plt.axis('off')
plt.show()
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
import numpy as np
import os
version = 2
batch_size = 128
epochs = 50
data_augmentation = True
num_classes = 10
subtract_pixel_mean = True # Subtracting pixel mean improves accuracy
depth = 20
# Model name, depth and version
model_type = 'ResNet%dv%d' % (depth, version)
# Normalizing data
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
def plot_model_history(model_history):
fig, ax = plt.subplots(1, 2, figsize=(15, 5))
# summarize history for accuracy
ax[0].plot(range(1, len(model_history.history['acc'])+1), model_history.history['acc'])#, label='Training Acc')
ax[0].plot(range(1, len(model_history.history['val_acc'])+1), model_history.history['val_acc'])#, label='Validation Acc')
ax[0].set_title('Model Accuracy')
ax[0].set_xlabel('Epochs')
ax[0].set_ylabel('Accuracy')
ax[0].set_xticks(np.arange(1, len(model_history.history['acc'])+1), len(model_history.history['acc'])/10)
ax[0].legend(['Train', 'Validation'], loc='best')
# summarize history for loss
ax[1].plot(range(1, len(model_history.history['loss'])+1), model_history.history['loss'])
ax[1].plot(range(1, len(model_history.history['val_loss'])+1), model_history.history['val_loss'])
ax[1].set_title('Model Loss')
ax[1].set_xlabel('Epochs')
ax[1].set_ylabel('Loss')
ax[1].set_xticks(np.arange(1, len(model_history.history['loss'])+1), len(model_history.history['loss'])/10)
ax[1].legend(['Train', 'Validation'], loc='best')
plt.show()
def lr_schedule(epoch):
lr = round(0.003 * pow(0.5, epoch//8), 10) # halving lr every 8 epochs
print ('Learning rate is : ', lr)
return lr
# print([ lr_schedule(e) for e in range(50)])
def resnet_layer(inputs,
num_filters=16,
kernel_size=3,
strides=1,
activation='relu',
batch_normalization=True,
conv_first=True):
"""
2D Convolution-Batch Normalization-Activation stack builder
# Arguments
inputs (tensor): input tensor from input image or previous layer
num_filters (int): Conv2D number of filters
kernel_size (int): Conv2D square kernel dimensions
strides (int): Conv2D square stride dimensions
activation (string): activation name
batch_normalization (bool): whether to include batch normalization
conv_first (bool): conv-bn-activation (True) or
bn-activation-conv (False)
# Returns
x (tensor): tensor as input to the next layer
"""
conv = Conv2D(num_filters,
kernel_size=kernel_size,
strides=strides,
padding='same',
kernel_initializer='he_normal',
kernel_regularizer=l2(1e-4))
x = inputs
if conv_first:
x = conv(x)
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
else:
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
x = conv(x)
return x
def resnet_v2(input_shape, depth, num_classes=10):
"""ResNet Version 2 Model builder [b]
Stacks of (1 x 1)-(3 x 3)-(1 x 1) BN-ReLU-Conv2D or also known as
bottleneck layer
First shortcut connection per layer is 1 x 1 Conv2D.
Second and onwards shortcut connection is identity.
At the beginning of each stage, the feature map size is halved (downsampled)
by a convolutional layer with strides=2, while the number of filter maps is
doubled. Within each stage, the layers have the same number filters and the
same filter map sizes.
Features maps sizes:
conv1 : 32x32, 16
stage 0: 32x32, 64
stage 1: 16x16, 128
stage 2: 8x8, 256
# Arguments
input_shape (tensor): shape of input image tensor
depth (int): number of core convolutional layers
num_classes (int): number of classes (CIFAR10 has 10)
# Returns
model (Model): Keras model instance
"""
if (depth - 2) % 9 != 0:
raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])')
# Start model definition.
num_filters_in = 16
num_res_blocks = int((depth - 2) / 9)
inputs = Input(shape=input_shape)
# v2 performs Conv2D with BN-ReLU on input before splitting into 2 paths
x = resnet_layer(inputs=inputs,
num_filters=num_filters_in,
conv_first=True)
# Instantiate the stack of residual units
for stage in range(3):
for res_block in range(num_res_blocks):
activation = 'relu'
batch_normalization = True
strides = 1
if stage == 0:
num_filters_out = num_filters_in * 4
if res_block == 0: # first layer and first stage
activation = None
batch_normalization = False
else:
num_filters_out = num_filters_in * 2
if res_block == 0: # first layer but not first stage
strides = 2 # downsample
# bottleneck residual unit
y = resnet_layer(inputs=x,
num_filters=num_filters_in,
kernel_size=1,
strides=strides,
activation=activation,
batch_normalization=batch_normalization,
conv_first=False)
y = resnet_layer(inputs=y,
num_filters=num_filters_in,
conv_first=False)
y = resnet_layer(inputs=y,
num_filters=num_filters_out,
kernel_size=1,
conv_first=False)
if res_block == 0:
# linear projection residual shortcut connection to match
# changed dims
x = resnet_layer(inputs=x,
num_filters=num_filters_out,
kernel_size=1,
strides=strides,
activation=None,
batch_normalization=False)
x = keras.layers.add([x, y])
num_filters_in = num_filters_out
# Add classifier on top.
# v2 has BN-ReLU before Pooling
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = AveragePooling2D(pool_size=8)(x)
y = Flatten()(x)
outputs = Dense(num_classes,
activation='softmax',
kernel_initializer='he_normal')(y)
# Instantiate model.
model = Model(inputs=inputs, outputs=outputs)
return model
model = resnet_v2(input_shape=x_train.shape[1:], depth=depth)
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=lr_schedule(0)),
metrics=['accuracy'])
model.summary()
print(model_type)
# Prepare model model saving directory.
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'cifar10_%s_model.{epoch:03d}.h5' % model_type
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
filepath = os.path.join(save_dir, model_name)
# Prepare callbacks for model saving and for learning rate adjustment.
checkpoint = ModelCheckpoint(filepath=filepath,
monitor='val_acc',
verbose=1,
save_best_only=True)
lr_scheduler = LearningRateScheduler(lr_schedule)
lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
cooldown=0,
patience=5,
min_lr=0.5e-6)
callbacks = [checkpoint, lr_reducer, lr_scheduler]
# Run training, with or without data augmentation.
if not data_augmentation:
print('Not using data augmentation.')
model_info = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks)
else:
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
# set input mean to 0 over the dataset
featurewise_center=False,
# set each sample mean to 0
samplewise_center=False,
# divide inputs by std of dataset
featurewise_std_normalization=False,
# divide each input by its std
samplewise_std_normalization=False,
# apply ZCA whitening
zca_whitening=False,
# epsilon for ZCA whitening
zca_epsilon=1e-06,
# randomly rotate images in the range (deg 0 to 180)
rotation_range=0,
# randomly shift images horizontally
width_shift_range=0.1,
# randomly shift images vertically
height_shift_range=0.1,
# set range for random shear
shear_range=0.,
# set range for random zoom
zoom_range=0.,
# set range for random channel shifts
channel_shift_range=0.,
# set mode for filling points outside the input boundaries
fill_mode='nearest',
# value used for fill_mode = "constant"
cval=0.,
# randomly flip images
horizontal_flip=True,
# randomly flip images
vertical_flip=False,
# set rescaling factor (applied before any other transformation)
rescale=None,
# set function that will be applied on each input
preprocessing_function=None,
# image data format, either "channels_first" or "channels_last"
data_format=None,
# fraction of images reserved for validation (strictly between 0 and 1)
validation_split=0.0)
# Compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)
# Fit the model on the batches generated by datagen.flow().
model_info = model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
validation_data=(x_test, y_test),
epochs=50, verbose=1, workers=4,
callbacks=callbacks)
# plot model history
plot_model_history(model_info)
plt.plot([ lr_schedule(e) for e in range(50)])
plt.xlabel('No. of epochs')
plt.ylabel('Learning rate')
plt.show()
###Output
_____no_output_____
###Markdown
GradCAM
###Code
from skimage import io
import cv2
from keras.preprocessing import image
from keras.applications.resnet import resnet, preprocess_input, decode_predictions
from google.colab.patches import cv2_imshow
url = 'https://www.rspcapetinsurance.org.au/rspca/media/images/hero/dog-insurance-hero.jpg'
url_class = 'dog'
def get_gradcam(url, url_class):
# read and resize image
img = io.imread(url)
orig_shape = (img.shape[1], img.shape[0])
print('Input url :', url)
print('=='*36)
print('Label :',url_class)
print('Original image')
cv2_imshow(img)
print('--'*81)
img = cv2.resize(img, dsize=(32,32), interpolation=cv2.INTER_CUBIC) # resize img to model input shape
# Normalizing and changing shape for model prediction
pred_img = image.img_to_array(img/255)
pred_img = np.expand_dims(pred_img, axis=0)
# If subtract pixel mean is enabled
if subtract_pixel_mean:
pred_img -= x_train_mean
y_pred = model.predict(pred_img)
class_idx = np.argmax(y_pred[0])
print('Predicted Label :',class_names[class_idx])
class_output = model.output[:, class_idx]
last_conv_layer = model.get_layer('conv2d_22')
grads = K.gradients(class_output, last_conv_layer.output)[0]
# find the average for each channel of gradients
pooled_grads = K.mean(grads, axis=(0, 1, 2))
# get iterator to parse input and compute the tensor
iterate = K.function([model.input], [pooled_grads, last_conv_layer.output[0]])
# actually compute for the given input
pooled_grads_value, conv_layer_output_value = iterate([pred_img])
# multiple the pooled gradients with each channel output
for i in range(64):
conv_layer_output_value[:, :, i] *= pooled_grads_value[i]
# add all channels and divide by 64, i.e. the mean as heatmap
heatmap = np.mean(conv_layer_output_value, axis = -1)
# apply RELU, i.e. reject all negative values and normalize by dividing by maximum
# so that all values are between -0 and 1
heatmap = np.maximum(heatmap, 0)
heatmap /= np.max(heatmap)
# resize heatmap to actual image resolution i.e. 32x32 and scale from 0 to 255 with colormap
heatmap = cv2.resize(heatmap, (img.shape[1], img.shape[0]))
heatmap = np.uint8(255 * heatmap)
heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
# superimpore input and output
superimposed_img = cv2.addWeighted(img, 0.5, heatmap, 0.5, 0)
# scale superimposed image for clarity 4 times
superimposed_img = cv2.resize(superimposed_img, orig_shape)
print('GradCAM Heat Maps based on Network activations for class')
cv2_imshow(superimposed_img)
print('**'*18)
print('**'*18)
get_gradcam(url, url_class)
airplanes = ['https://images.unsplash.com/photo-1543903905-cee4ab46985c', 'https://images.unsplash.com/photo-1559711469-31b420b24c10', 'https://images.unsplash.com/photo-1559023234-1e773470544f', 'https://images.unsplash.com/photo-1562368370-cff10978a647', 'https://images.unsplash.com/photo-1531642765602-5cae8bbbf285']
cars = ['https://images.unsplash.com/photo-1523676060187-f55189a71f5e', 'https://images.unsplash.com/photo-1532974297617-c0f05fe48bff', 'https://images.unsplash.com/photo-1485291571150-772bcfc10da5', 'https://images.unsplash.com/photo-1489824904134-891ab64532f1', 'https://images.unsplash.com/photo-1503376780353-7e6692767b70', 'https://images.unsplash.com/photo-1529369623266-f5264b696110']
birds = ['https://images.unsplash.com/photo-1448227922836-6d05b3f8b663', 'https://images.unsplash.com/photo-1433321768402-897b0324c ', 'https://images.unsplash.com/photo-1507477338202-487281e6c27e', 'https://images.unsplash.com/photo-1471602671180-19fb2b491359', 'https://images.unsplash.com/photo-1506220926022-cc5c12acdb35', 'https://images.unsplash.com/photo-1511692277506-3be3a7ab1686']
for url in airplanes:
get_gradcam(url, 'airplane')
for url in cars:
get_gradcam(url, 'automobile')
for url in birds:
get_gradcam(url, 'bird')
cat = ['https://images.unsplash.com/photo-1548681528-6a5c45b66b42, 'https://images.unsplash.com/photo-1543852786-1cf6624b9987, 'https://images.unsplash.com/photo-1514888286974-6c03e2ca1dba, 'https://images.unsplash.com/photo-1519052537078-e6302a4968d4, 'https://images.unsplash.com/photo-1501820488136-72669149e0d4, 'https://images.unsplash.com/photo-1526336024174-e58f5cdd8e13, 'https://images.unsplash.com/photo-1513360371669-4adf3dd7dff8']
deer = ['https://images.unsplash.com/photo-1484406566174-9da000fda645, 'https://images.unsplash.com/photo-1537694513497-5f0b0ec361c7, 'https://images.unsplash.com/photo-1542890886-40c9094e352a, 'https://images.unsplash.com/photo-1565440707934-c9bacbad2146']
dog = ['https://images.unsplash.com/photo-1534361960057-19889db9621e', 'https://images.unsplash.com/photo-1548199973-03cce0bbc87b']
for url in cat:
get_gradcam(url, 'cat')
for url in deer:
get_gradcam(url, 'deer')
###Output
_____no_output_____ |
01_ipythonNotebook_intro.ipynb | ###Markdown
Jupyter Notebook Introduction Objectives - Become familiar with the **Jupyter Notebook**.- Introduce the **landscape**.- How do you currently: * wrangle data? * visualize results? * Analysis: machine learning, stats * Parallel computing * Big data What is Python? Python is a general-purpose programming language that blends procedural, functional, and object-oriented paradigmsMark Lutz, Learning Python * Simple, clean syntax* Easy to learn* Interpreted* Strong, dynamically typed* Runs everywhere: Linux, Mac, and Windows* [Free](http://www.fsf.org/) and open* Expressive: do more with fewer lines of code Abstractions Jupyter Notebook Interactive web-based computing, data analysis, and documentation.- One document for code and output- Document process- Share results Locally and Remote - Run locally- Connect to the cloud (e.g [AWS](http://aws.amazon.com/))- Connect to supercomputer (e.g. XSEDE Resource) Keyboard Shortcuts Images This is an image: Video
###Code
from IPython.display import YouTubeVideo
YouTubeVideo('F4rFuIb1Ie4', start=2400, width=600, height=400)
###Output
_____no_output_____
###Markdown
Magic Commands - Built-in useful functions- `%` line commands- `%%` cell commands
###Code
%lsmagic
###Output
_____no_output_____
###Markdown
Write to a file.
###Code
%%writefile example.py
def hello():
print 'hello'
###Output
Writing example.py
###Markdown
Load a file into a notebook.
###Code
%load example.py
def hello():
print 'hello'
def hello():
print 'hello'
hello()
###Output
hello
###Markdown
Execute a file Great for working with your own files.
###Code
%%writefile example.py
def get():
return 10
A = get()
%run example.py
print A
###Output
10
###Markdown
**Notice** that `A` is now available. `import` and `reload` Great for working with your own `modules`.
###Code
import example as ex
reload(ex)
print ex.get()
###Output
10
###Markdown
Other Languages Bash
###Code
%%bash
ls -l
###Output
_____no_output_____
###Markdown
R
###Code
%load_ext rpy2.ipython
%%R
3+1
help()
###Output
_____no_output_____ |
sliderule_dsi_inferential_statistics_exercise_3.ipynb | ###Markdown
Hospital Readmissions Data Analysis and Recommendations for Reduction BackgroundIn October 2012, the US government's Center for Medicare and Medicaid Services (CMS) began reducing Medicare payments for Inpatient Prospective Payment System hospitals with excess readmissions. Excess readmissions are measured by a ratio, by dividing a hospital’s number of “predicted” 30-day readmissions for heart attack, heart failure, and pneumonia by the number that would be “expected,” based on an average hospital with similar patients. A ratio greater than 1 indicates excess readmissions. Exercise DirectionsIn this exercise, you will:+ critique a preliminary analysis of readmissions data and recommendations (provided below) for reducing the readmissions rate+ construct a statistically sound analysis and make recommendations of your own More instructions provided below. Include your work **in this notebook and submit to your Github account**. Resources+ Data source: https://data.medicare.gov/Hospital-Compare/Hospital-Readmission-Reduction/9n3s-kdb3+ More information: http://www.cms.gov/Medicare/medicare-fee-for-service-payment/acuteinpatientPPS/readmissions-reduction-program.html+ Markdown syntax: http://nestacms.com/docs/creating-content/markdown-cheat-sheet****
###Code
%matplotlib inline
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import bokeh.plotting as bkp
from mpl_toolkits.axes_grid1 import make_axes_locatable
# read in readmissions data provided
hospital_read_df = pd.read_csv('data/cms_hospital_readmissions.csv')
###Output
_____no_output_____
###Markdown
**** Preliminary Analysis
###Code
# deal with missing and inconvenient portions of data
clean_hospital_read_df = hospital_read_df[hospital_read_df['Number of Discharges'] != 'Not Available']
clean_hospital_read_df.loc[:, 'Number of Discharges'] = clean_hospital_read_df['Number of Discharges'].astype(int)
clean_hospital_read_df = clean_hospital_read_df.sort_values('Number of Discharges')
clean_hospital_read_df.head(5)
# generate a scatterplot for number of discharges vs. excess rate of readmissions
# lists work better with matplotlib scatterplot function
x = [a for a in clean_hospital_read_df['Number of Discharges'][81:-3]]
y = list(clean_hospital_read_df['Excess Readmission Ratio'][81:-3])
fig, ax = plt.subplots(figsize=(8,5))
ax.scatter(x, y,alpha=0.2)
ax.fill_between([0,350], 1.15, 2, facecolor='red', alpha = .15, interpolate=True)
ax.fill_between([800,2500], .5, .95, facecolor='green', alpha = .15, interpolate=True)
ax.set_xlim([0, max(x)])
ax.set_xlabel('Number of discharges', fontsize=12)
ax.set_ylabel('Excess rate of readmissions', fontsize=12)
ax.set_title('Scatterplot of number of discharges vs. excess rate of readmissions', fontsize=14)
ax.grid(True)
fig.tight_layout()
###Output
_____no_output_____
###Markdown
**** Preliminary ReportRead the following results/report. While you are reading it, think about if the conclusions are correct, incorrect, misleading or unfounded. Think about what you would change or what additional analyses you would perform.**A. Initial observations based on the plot above**+ Overall, rate of readmissions is trending down with increasing number of discharges+ With lower number of discharges, there is a greater incidence of excess rate of readmissions (area shaded red)+ With higher number of discharges, there is a greater incidence of lower rates of readmissions (area shaded green) **B. Statistics**+ In hospitals/facilities with number of discharges < 100, mean excess readmission rate is 1.023 and 63% have excess readmission rate greater than 1 + In hospitals/facilities with number of discharges > 1000, mean excess readmission rate is 0.978 and 44% have excess readmission rate greater than 1 **C. Conclusions**+ There is a significant correlation between hospital capacity (number of discharges) and readmission rates. + Smaller hospitals/facilities may be lacking necessary resources to ensure quality care and prevent complications that lead to readmissions.**D. Regulatory policy recommendations**+ Hospitals/facilties with small capacity (< 300) should be required to demonstrate upgraded resource allocation for quality care to continue operation.+ Directives and incentives should be provided for consolidation of hospitals and facilities to have a smaller number of them with higher capacity and number of discharges. **** ExerciseInclude your work on the following **in this notebook and submit to your Github account**. A. Do you agree with the above analysis and recommendations? Why or why not? B. Provide support for your arguments and your own recommendations with a statistically sound analysis: 1. Setup an appropriate hypothesis test. 2. Compute and report the observed significance value (or p-value). 3. Report statistical significance for $\alpha$ = .01. 4. Discuss statistical significance and practical significance. Do they differ here? How does this change your recommendation to the client? 5. Look at the scatterplot above. - What are the advantages and disadvantages of using this plot to convey information? - Construct another plot that conveys the same information in a more direct manner.You can compose in notebook cells using Markdown: + In the control panel at the top, choose Cell > Cell Type > Markdown+ Markdown syntax: http://nestacms.com/docs/creating-content/markdown-cheat-sheet****
###Code
# Your turn
# select only columns that are I'm interested
df = clean_hospital_read_df[['Number of Discharges','Excess Readmission Ratio', \
'Predicted Readmission Rate','Expected Readmission Rate','Number of Readmissions']]
# remove data with 4 columns with NaN (null)
df = df.dropna(how='any')
###Output
_____no_output_____
###Markdown
###Code
from scipy import stats
# define 1000 as high number of discharges and less than 300 as low
df_hi_dsch = df[df['Number of Discharges'] >= 1000]
df_lo_dsch = df[df['Number of Discharges'] <= 300]
# compute the mean for checking
print('Mean Excess Readmission (high discharges): ',df_hi_dsch['Excess Readmission Ratio'].mean())
print('Mean Excess Readmission (low discharges): ',df_lo_dsch['Excess Readmission Ratio'].mean())
print('Number of hospitals with high number of discharges and high excess readmission ratio = ', \
(df_hi_dsch[df_hi_dsch['Excess Readmission Ratio'] > 1.0].count()[0])/df_hi_dsch.count()[0]*100)
print('Number of hospitals with low number of discharges and high excess readmission ratio = ', \
(df_lo_dsch[df_lo_dsch['Excess Readmission Ratio'] > 1.0].count()[0])/df_lo_dsch.count()[0]*100)
# compute the t-test pvalue
print('Compare number of dscharges p-value=', \
stats.ttest_ind(df_hi_dsch['Excess Readmission Ratio'], df_lo_dsch['Excess Readmission Ratio'], equal_var = False))
# construct scatterplot
sns.lmplot('Number of Discharges','Excess Readmission Ratio', df_hi_dsch)
sns.plt.title('Scatterplot of High number of discharges vs. excess rate of readmissions', fontsize=14)
sns.lmplot('Number of Discharges','Excess Readmission Ratio', df_lo_dsch)
sns.plt.title('Scatterplot of Low number of discharges vs. excess rate of readmissions', fontsize=14)
# construct correlation matrix
corrmat = df.corr()
print(corrmat)
# Draw the heatmap using seaborn
sns.heatmap(corrmat, vmax=.8, square=True)
import numpy as np
import scipy as sc
def cohens_d(x, y):
lx = len(x)- 1
ly = len(y)- 1
md = np.abs(x.mean() - y.mean()) ## mean difference (numerator)
csd = lx * x.var() + ly * y.var()
csd = csd/(lx + ly)
#print(md)
csd = np.sqrt(csd) ## common sd computation
return md/csd ## cohen's d
def printCohen(x):
if x >= .80:
print("large effect")
elif x >= .50:
print("medium effect")
elif x >= .20:
print("small effect")
else: print("no effect")
return x
cd=cohens_d(df_hi_dsch, df_lo_dsch)
print(df_hi_dsch.dtypes.index[1], 'cohen''s d ratio= ',printCohen(cd[1]))
print('test Pearson r: ', sc.stats.pearsonr(df['Number of Discharges'],df['Excess Readmission Ratio']))
###Output
small effect
Excess Readmission Ratio cohens d ratio= 0.419738200509
test Pearson r: (-0.097397943510793533, 1.222547377680967e-25)
###Markdown
Hospital Readmissions Data Analysis and Recommendations for Reduction BackgroundIn October 2012, the US government's Center for Medicare and Medicaid Services (CMS) began reducing Medicare payments for Inpatient Prospective Payment System hospitals with excess readmissions. Excess readmissions are measured by a ratio, by dividing a hospital’s number of “predicted” 30-day readmissions for heart attack, heart failure, and pneumonia by the number that would be “expected,” based on an average hospital with similar patients. A ratio greater than 1 indicates excess readmissions. Exercise DirectionsIn this exercise, you will:+ critique a preliminary analysis of readmissions data and recommendations (provided below) for reducing the readmissions rate+ construct a statistically sound analysis and make recommendations of your own More instructions provided below. Include your work **in this notebook and submit to your Github account**. Resources+ Data source: https://data.medicare.gov/Hospital-Compare/Hospital-Readmission-Reduction/9n3s-kdb3+ More information: http://www.cms.gov/Medicare/medicare-fee-for-service-payment/acuteinpatientPPS/readmissions-reduction-program.html+ Markdown syntax: http://nestacms.com/docs/creating-content/markdown-cheat-sheet****
###Code
%matplotlib inline
import pandas as pd
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import bokeh.plotting as bkp
from mpl_toolkits.axes_grid1 import make_axes_locatable
# read in readmissions data provided
hospital_read_df = pd.read_csv('data/cms_hospital_readmissions.csv')
###Output
_____no_output_____
###Markdown
**** Preliminary Analysis
###Code
# deal with missing and inconvenient portions of data
clean_hospital_read_df = hospital_read_df[hospital_read_df['Number of Discharges'] != 'Not Available']
clean_hospital_read_df.loc[:, 'Number of Discharges'] = clean_hospital_read_df['Number of Discharges'].astype(int)
clean_hospital_read_df = clean_hospital_read_df.sort_values('Number of Discharges')
# generate a scatterplot for number of discharges vs. excess rate of readmissions
# lists work better with matplotlib scatterplot function
x = [a for a in clean_hospital_read_df['Number of Discharges'][81:-3]]
y = list(clean_hospital_read_df['Excess Readmission Ratio'][81:-3])
fig, ax = plt.subplots(figsize=(8,5))
ax.scatter(x, y,alpha=0.2)
ax.fill_between([0,350], 1.15, 2, facecolor='red', alpha = .15, interpolate=True)
ax.fill_between([800,2500], .5, .95, facecolor='green', alpha = .15, interpolate=True)
ax.set_xlim([0, max(x)])
ax.set_xlabel('Number of discharges', fontsize=12)
ax.set_ylabel('Excess rate of readmissions', fontsize=12)
ax.set_title('Scatterplot of number of discharges vs. excess rate of readmissions', fontsize=14)
ax.grid(True)
fig.tight_layout()
###Output
_____no_output_____
###Markdown
**** Preliminary ReportRead the following results/report. While you are reading it, think about if the conclusions are correct, incorrect, misleading or unfounded. Think about what you would change or what additional analyses you would perform.**A. Initial observations based on the plot above**+ Overall, rate of readmissions is trending down with increasing number of discharges+ With lower number of discharges, there is a greater incidence of excess rate of readmissions (area shaded red)+ With higher number of discharges, there is a greater incidence of lower rates of readmissions (area shaded green) **B. Statistics**+ In hospitals/facilities with number of discharges < 100, mean excess readmission rate is 1.023 and 63% have excess readmission rate greater than 1 + In hospitals/facilities with number of discharges > 1000, mean excess readmission rate is 0.978 and 44% have excess readmission rate greater than 1 **C. Conclusions**+ There is a significant correlation between hospital capacity (number of discharges) and readmission rates. + Smaller hospitals/facilities may be lacking necessary resources to ensure quality care and prevent complications that lead to readmissions.**D. Regulatory policy recommendations**+ Hospitals/facilties with small capacity (< 300) should be required to demonstrate upgraded resource allocation for quality care to continue operation.+ Directives and incentives should be provided for consolidation of hospitals and facilities to have a smaller number of them with higher capacity and number of discharges. **** ExerciseInclude your work on the following **in this notebook and submit to your Github account**. A. Do you agree with the above analysis and recommendations? Why or why not? B. Provide support for your arguments and your own recommendations with a statistically sound analysis: 1. Setup an appropriate hypothesis test. 2. Compute and report the observed significance value (or p-value). 3. Report statistical significance for $\alpha$ = .01. 4. Discuss statistical significance and practical significance. Do they differ here? How does this change your recommendation to the client? 5. Look at the scatterplot above. - What are the advantages and disadvantages of using this plot to convey information? - Construct another plot that conveys the same information in a more direct manner.You can compose in notebook cells using Markdown: + In the control panel at the top, choose Cell > Cell Type > Markdown+ Markdown syntax: http://nestacms.com/docs/creating-content/markdown-cheat-sheet****
###Code
# Your turn
###Output
_____no_output_____
###Markdown
A. Do you agree with the above analysis and recommendations? Why or why not?**A. Initial observations based on the plot above**+ Overall, rate of readmissions is trending down with increasing number of discharges+ With lower number of discharges, there is a greater incidence of excess rate of readmissions (area shaded red)+ With higher number of discharges, there is a greater incidence of lower rates of readmissions (area shaded green) *****-> The first point is very hard to tell on the graph above, especially considering the handfull of outliers in the upper left area, shaded red. Ideally, a correlation and/or regression line should be used to confirm a trend.**- For the second two points these are specific statements about only a subsection of the data. Better would be to categorize 'number of discharges' into low and high, for example, and test whether they are different (e.g. t test).***B. Statistics**+ In hospitals/facilities with number of discharges < 100, mean excess readmission rate is 1.023 and 63% have excess readmission rate greater than 1 + In hospitals/facilities with number of discharges > 1000, mean excess readmission rate is 0.978 and 44% have excess readmission rate greater than 1 *****-> These are likely accurate statements, but don't seem to originate from statistical tests. If the given threshold values are important, then again, a t-test could be performed to check difference in proportion of excess readmission greater than 1.***C. Conclusions**+ There is a significant correlation between hospital capacity (number of discharges) and readmission rates. + Smaller hospitals/facilities may be lacking necessary resources to ensure quality care and prevent complications that lead to readmissions.*****-> I see no evidence in the report that suggests significant correlation of any factors. A significance test is required. The interpretation of this 'result' is therefore misleading.***D. Regulatory policy recommendations**+ Hospitals/facilties with small capacity (< 300) should be required to demonstrate upgraded resource allocation for quality care to continue operation.+ Directives and incentives should be provided for consolidation of hospitals and facilities to have a smaller number of them with higher capacity and number of discharges.*****-> Again, these applications of the 'analysis' are very misleading since they are not based on any significance test.* B. Provide support for your arguments and your own recommendations with a statistically sound analysis:B1. Setup an appropriate hypothesis test.Based on the direction of the provided report, it seems the critical issue is whether the excess readmissions ratio is larger or smaller depending on the size of the hospital. Although we aren't given the size (e.g. number of beds) for each hospital, we are given number of discharges. This number is most likely well correlated with hospital size. The above analysis makes policy implications based on the theshold of 300 discharges, so I will use this same threshold in my own analysis. Given the data, an appropriate hypothesis test would be:**Null hypothesis:** Excess readmission ratio for hospitals with discharge rates less than 300 ("small") is the same as that for hospitals with discharge rates greater than 300 ("large").**Alternative hypothesis:** Excess readmission ratio for hospitals with discharge rates less than 300 ("small") is NOT the same as that for hospitals with discharge rates greater than 300 ("large").
###Code
clean_hospital_read_df.head()
#subset dataframe by threshold value 300
small_df = clean_hospital_read_df[clean_hospital_read_df['Number of Discharges'] < 300]
large_df = clean_hospital_read_df[clean_hospital_read_df['Number of Discharges'] > 300]
#define series and means to be used in analysis for excess readmission ratio
small_ratio = small_df['Excess Readmission Ratio']
large_ratio = large_df['Excess Readmission Ratio']
small_ratio_mean = np.mean(small_df['Excess Readmission Ratio'])
large_ratio_mean = np.mean(large_df['Excess Readmission Ratio'])
#define series for discharge values
small_disch = small_df['Number of Discharges']
large_disch = large_df['Number of Discharges']
###Output
_____no_output_____
###Markdown
B2. Compute and report the observed significance value (or p-value).
###Code
# Define mean difference
mean_diff = small_ratio_mean - large_ratio_mean
print('Mean difference',mean_diff)
# Print sample size
print(str(large_ratio.shape[0]))
print(str(small_ratio.shape[0]))
SE = mean_diff / np.sqrt(small_ratio.var()/small_ratio.size + large_ratio.var()/large_ratio.size)
print("Standard error:", SE)
p_val = 2*(1-stats.norm.cdf(np.abs(SE)))
p_val
###Output
_____no_output_____
###Markdown
This p-value is <0.05, and we can accept the alternative hypothesis that readmission rates are different between small and large hospitals. B3. Report statistical significance for $\alpha$ = .01. The p-value also passes the test when considering $\alpha$ = .01. B4. Discuss statistical significance and practical significance. Do they differ here? How does this change your recommendation to the client?
###Code
# Calculate relative difference between groups, percent difference
mean_diff_perc = (mean_diff/small_ratio_mean) * 100
print('Mean percent difference',mean_diff_perc)
# Calculate confidence interval
small_conf = stats.t.interval(0.95, len(small_ratio)-1, loc=small_ratio_mean, scale=stats.sem(small_ratio))
large_conf = stats.t.interval(0.95, len(large_ratio)-1, loc=large_ratio_mean, scale=stats.sem(large_ratio))
print("95% Confidence interval, small hospitals:",small_conf)
print("95% Confidence interval, large hospitals:",large_conf)
###Output
95% Confidence interval, small hospitals: (nan, nan)
95% Confidence interval, large hospitals: (0.99707153996014497, 1.0024905476447004)
###Markdown
- The mean difference between the two sizes of hospital was 0.014. The increase in readmission for smaller hospitals was about 1% greater than for larger hospitals. - While the confidence intervals for readmission rates for small and large hospitals are not overlapping, they are very close.- Because the difference in readmission rates is so small, it may NOT be worth spending time and money on addressing this in the manner suggested in the previous analysis, which went as far as suggesting hospital closures. This seems extreme when considering the minor differences. B5. Look at the scatterplot above.- What are the advantages and disadvantages of using this plot to convey information?- Construct another plot that conveys the same information in a more direct manner. Advantages: - clear labels (axis and chart title)- scatter plot style usually allows reader to see all data pointsDisadvantages:- shaded areas are misleading- plot is crowded, and so it is hard to gather information about the general trends (a trendline might help)- data are not segmented by categories of interest (e.g. small and large hospitals)
###Code
import seaborn as sns
fig, ax = plt.subplots(figsize=(10,10))
sns.boxplot(data=[small_df['Excess Readmission Ratio'],large_df['Excess Readmission Ratio']])
xmin,xmax=ax.get_xlim()
ymin,ymax=ax.get_ylim()
labels=['Small Hospitals','Large Hospitals']
plt.hlines(y=1.0,xmin=xmin,xmax=xmax,color='r')
ax.set_xlabel('Hospital Size',fontsize=20)
ax.set_ylabel('Readmission Rate',fontsize=20)
ax.set_xticklabels(labels)
ax.fill_between([xmin,xmax], 1,ymax, facecolor='orange', alpha = .15, interpolate=True)
ax.fill_between([xmin,xmax], ymin, 1, facecolor='blue', alpha = .15, interpolate=True)
fig, ax = plt.subplots(figsize=(10,10))
sns.regplot(large_df['Number of Discharges'], large_df['Excess Readmission Ratio'], scatter_kws={'alpha':0.15})
sns.regplot(small_df['Number of Discharges'], small_df['Excess Readmission Ratio'], scatter_kws={'alpha':0.15})
ax.set_xlabel('Hospital Size',fontsize=20)
ax.set_ylabel('Readmission Rate',fontsize=20)
###Output
_____no_output_____ |
final_kernels_project_solutions/prob3-sols.ipynb | ###Markdown
Question 3) Kernel PCAIn previous assignments, you've worked with PCA to find a lower dimensional representation of a data matrix, allowing us to perform tasks like classification more easily. As we said in the notes, kernels have a wide range of applications. In this problem, we'll take a look at the application of kernels to PCA. Question 3a)First, let's look at the half moon data again.
###Code
X, y = datasets.make_moons(n_samples = 500, noise = 0.04)
plt.scatter(X[:, 0], X[:, 1], c = y)
###Output
_____no_output_____
###Markdown
Run PCA on this dataset with 1 and 2 components, and visualize the result. Fill in the code such that `X_red` has the original data projected onto the first 2 principal components. Answer the following questions.* **Do you notice anything different about this graph? Why did this change happen?** The student should notice there is a slight change in rotation, this is because PCA looks for the directions without regard to rotation or orientation in the original space.
###Code
fig, axes = plt.subplots(1, 2, figsize=(18,4))
# START TODO
pca = PCA(n_components=2)
X_red = pca.fit_transform(X)
# END TODO
axes[0].scatter(X_red[:,0], np.zeros(X_red.shape[0]), c = y)
axes[1].scatter(X_red[:,0], X_red[:,1], c = y)
###Output
_____no_output_____
###Markdown
PCA, while it is able to identify the important directions in our data, is confined to a linear feature space. This means that we are still stuck with the problem where our dataset is linearly inseparable. As we know, it often helps to lift our features by mapping each datapoint to a higher dimensional space. Kernels will allow us to lift our features without ever having to explicitly compute the higher dimensional space for our data matrix. Instead, we can simply just perform PCA on the Gram matrix K, which will give us the most important directions in this lifted feature space without having to go through the computational complexity of computing it. sk-learn has a built in Kernel PCA implementation that we can use on our half moon dataset here https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.KernelPCA.html Complete the following code to finish the function `kernel_pca_poly`, which takes in a value for gamma `g` and computes a 2 component KernelPCA with gamma `g` and the RBF kernel on the data in `X`.
###Code
def kernel_pca_gamma(gamma):
# START TODO
kernel_pca = KernelPCA(n_components=2, kernel='rbf',gamma=gamma)
X_red_kernel = kernel_pca.fit_transform(X)
# END TODO
fig, axes = plt.subplots(1, 2, figsize=(18,4))
axes[0].scatter(X_red_kernel[:,0], np.zeros(X_red_kernel.shape[0]), c = y)
axes[1].scatter(X_red_kernel[:,0], X_red_kernel[:,1], c = y)
plt.show()
###Output
_____no_output_____
###Markdown
Now run the following code to visualize our results. Play with the gamma parameter and answer the following questions.* **What does the graph look like as gamma approaches infinity? What about negative infinity?** As gamma approaches infinity, we can see that the data becomes more separated with one class turning almost one dimensional. For negative values, the graph starts to spread out and get clustered around one point. In general, as gamma is increased we know from the equation for an RBF kernel that the kernel function will become very small, with the highest value being assumed when the two vectors passed in are the same. The opposite is true when gamma is very small.* **What happens when gamma is 0? Why does this make sense?** When gamma is zero, all the values are in one point at (0,0). This is because the kernel function always outputs 1 when gamma is equal to zero, which means that every point is essentially seen as the same. In other words, there are no important directions in the data.* **What is the value of gamma that visually seems like it would cause the data to be most separable?** A gamma around 15 is reasonable.* **What method can we use to find an optimal gamma to make this data separable?** We can use cross validation.
###Code
g_widget = FloatSlider(min=-10, max=20.0)
interact(kernel_pca_gamma,gamma=g_widget)
###Output
_____no_output_____
###Markdown
Question 3b)Fill in the code to use a polynomial kernel now, and answer the following questions.* **Try keeping the degree fixed and changing gamma. What do you notice happens as gamma gets to be a large positive number? What about to be a small negative number? What about 0?** The data is not separable at all with high gamma, with low gamma it is separable but all the purple datapoints are classified as the same. The same behavior occurs as with RBF when gamma is 0.* **Now keep the gamma fixed and change the degree. What do you notice happens as the degree takes on even and odd values? What about small? What about 0?** When the degree is even, the purple points are projected into a circular disk, but when degree is odd it sort of diverges. This peculiar behavior happens when the polynomial is either even and odd.
###Code
def kernel_pca_poly(degree, gamma):
# START TODO
kernel_pca = KernelPCA(n_components=2, kernel='poly',gamma=gamma,degree=degree)
X_red_kernel = kernel_pca.fit_transform(X)
# END TODO
fig, axes = plt.subplots(1, 2, figsize=(18,4))
axes[0].scatter(X_red_kernel[:,0], np.zeros(X_red_kernel.shape[0]), c = y)
axes[1].scatter(X_red_kernel[:,0], X_red_kernel[:,1], c = y)
plt.show()
g_widget = FloatSlider(min=-10, max=20.0)
d_widget = IntSlider(min=1, max=10)
interact(kernel_pca_poly,gamma=g_widget,degree=d_widget)
###Output
_____no_output_____
###Markdown
Question 3c)Now let's do some classification with Logistic Regression to see how well we can classify the the original dataset, the dataset projected onto the first two principal components, and the dataset projected using the principal components from kernel PCA. First, let's look at the original dataset. Fill in the code in TODO such that we fit a Logistic Regression model and store the weights in a variable called `w`. Calculate the accuracy of the classifier on the dataset and store that in orig_accuracy.
###Code
# START TODO
clf = LogisticRegression().fit(X, y)
w = clf.coef_[0]
orig_accuracy = clf.score(X, y)
# END TODO
plt.scatter(X[:, 0], X[:, 1], c = y)
ax = plt.gca()
ax.autoscale(False)
x_vals = np.array(ax.get_xlim())
y_vals = -(x_vals * w[0])/w[1]
plt.plot(x_vals, y_vals, '--', c="red")
print ("Classifier accuracy: ", orig_accuracy)
###Output
Classifier accuracy: 0.884
###Markdown
Now do the same for the `X_red` dataset. Answer the following questions.* **Is the accuracy different?**
###Code
# START TODO
clf = LogisticRegression().fit(X_red, y)
w = clf.coef_[0]
pca_accuracy = clf.score(X_red, y)
# END TODO
plt.scatter(X_red[:,0], X_red[:,1], c = y)
ax = plt.gca()
ax.autoscale(False)
x_vals = np.array(ax.get_xlim())
y_vals = -(x_vals * w[0])/w[1]
plt.plot(x_vals, y_vals, '--', c="red")
print ("Classifier accuracy: ", pca_accuracy)
###Output
Classifier accuracy: 0.882
###Markdown
Now let's use kernel PCA. Use PCA with an RBF kernel to transform the dataset and use the value for gamma you visually identified in Question 2b.* **How's the accuracy this time?** Pretty damn good!
###Code
# START TODO
# assuming that gamma is 15
gamma = 17
kernel_pca = KernelPCA(n_components=2, kernel='rbf',gamma=gamma)
X_red_kernel = kernel_pca.fit_transform(X)
gamma = 15
clf = LogisticRegression().fit(X_red_kernel, y)
w = clf.coef_[0]
kpca_accuracy = clf.score(X_red_kernel, y)
# END TODO
plt.scatter(X_red_kernel[:,0], X_red_kernel[:,1], c = y)
ax = plt.gca()
ax.autoscale(False)
x_vals = np.array(ax.get_xlim())
y_vals = -(x_vals * w[0])/w[1]
plt.plot(x_vals, y_vals, '--', c="red")
print ("Classifier accuracy: ", kpca_accuracy)
###Output
Classifier accuracy: 0.998
|
next_steps/GraphNN_VAE.ipynb | ###Markdown
MAKE DATASET
###Code
import torch
from torch_geometric.data import InMemoryDataset, download_url
class DijetAnomaly(InMemoryDataset):
###Output
_____no_output_____
###Markdown
Make Plotting Scripts, Will later make into a separate module
###Code
def plot_event(pfcands, name):
pt = pfcands[:,0]
eta = pfcands[:,1]
phi = pfcands[:,2]
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.scatter(eta,phi,s=pt*10000, alpha=0.2)
ax.set_xlabel('eta')
ax.set_ylabel('phi')
ax.set_xlim([-4,4])
ax.set_ylim([-4,4])
fig.savefig(f'figures/{name}.png')
plot_event(data_sig[4],"event_sig_idx4")
###Output
_____no_output_____
###Markdown
40 Body System, 3 features ( pT, eta, Phi) for each object (PF candidate)
###Code
n_objects = 40 # number of PF candidates(nodes)
object_dim = 3 # features: mass, x coordinate, y coordinate, speed on x-axis, speed on y-axis
n_relations = n_objects * (n_objects - 1) # number of edges in fully connected graph
relation_dim = 1
effect_dim = 10 #effect's vector size
n_relations
data.shape
def get_batch(data, label, idx, batch_size):
start_idx = idx*batch_size
end_idx = idx*batch_size+batch_size
batch_data = data[start_idx: end_idx]
label_data = label[start_idx: end_idx]
objects = batch_data
#receiver_relations, sender_relations - onehot encoding matrices
#each column indicates the receiver and sender object’s index
receiver_relations = np.zeros((batch_size, n_objects, n_relations), dtype=float);
sender_relations = np.zeros((batch_size, n_objects, n_relations), dtype=float);
cnt = 0
for i in range(n_objects):
for j in range(n_objects):
if(i != j):
receiver_relations[:, i, cnt] = 1.0
sender_relations[:, j, cnt] = 1.0
cnt += 1
#There is no relation info in solar system task, just fill with zeros
relation_info = np.zeros((batch_size, n_relations, relation_dim))
target = label_data
objects = Variable(torch.FloatTensor(objects))
sender_relations = Variable(torch.FloatTensor(sender_relations))
receiver_relations = Variable(torch.FloatTensor(receiver_relations))
relation_info = Variable(torch.FloatTensor(relation_info))
target = Variable(torch.FloatTensor(target))
if USE_CUDA:
objects = objects.cuda()
sender_relations = sender_relations.cuda()
receiver_relations = receiver_relations.cuda()
relation_info = relation_info.cuda()
target = target.cuda()
return objects, sender_relations, receiver_relations, relation_info, target
class RelationalModel(nn.Module):
def __init__(self, input_size, output_size, hidden_size):
super(RelationalModel, self).__init__()
self.output_size = output_size
self.layers = nn.Sequential(
nn.Linear(input_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, output_size),
nn.ReLU()
)
def forward(self, x):
'''
Args:
x: [batch_size, n_relations, input_size]
Returns:
[batch_size, n_relations, output_size]
'''
batch_size, n_relations, input_size = x.size()
x = x.view(-1, input_size)
x = self.layers(x)
x = x.view(batch_size, n_relations, self.output_size)
return x
class ObjectModel(nn.Module):
def __init__(self, input_size, hidden_size, D):
super(ObjectModel, self).__init__()
self.layers = nn.Sequential(
nn.Linear(input_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, 1), #speedX and speedY
)
def forward(self, x):
'''
Args:
x: [batch_size, n_objects, input_size]
Returns:
[batch_size * n_objects, 2] speedX and speedY
'''
input_size = x.size(2)*x.size(1)
x = x.view(-1, input_size)
return self.layers(x)
class InteractionNetwork(nn.Module):
def __init__(self, n_objects, object_dim, n_relations, relation_dim, effect_dim):
super(InteractionNetwork, self).__init__()
self.relational_model = RelationalModel(2*object_dim + relation_dim, effect_dim, 150)
self.object_model = ObjectModel((object_dim + effect_dim)*n_objects, 100)
def forward(self, objects, sender_relations, receiver_relations, relation_info):
senders = sender_relations.permute(0, 2, 1).bmm(objects)
receivers = receiver_relations.permute(0, 2, 1).bmm(objects)
effects = self.relational_model(torch.cat([senders, receivers, relation_info], 2))
effect_receivers = receiver_relations.bmm(effects)
predicted = self.object_model(torch.cat([objects, effect_receivers], 2))
return predicted
USE_CUDA = True
interaction_network = InteractionNetwork(n_objects, object_dim, n_relations, relation_dim, effect_dim)
if USE_CUDA:
interaction_network = interaction_network.cuda()
optimizer = optim.Adam(interaction_network.parameters())
criterion = nn.BCEWithLogitsLoss()
n_epoch = 100
batch_size=1000
batches_per_epoch = int(len(data)/batch_size)
losses = []
for epoch in range(n_epoch):
for idx in range(batches_per_epoch):
print((idx/batches_per_epoch)*100,"percent")
objects, sender_relations, receiver_relations, relation_info, target = get_batch(data, label, idx, batch_size)
predicted = interaction_network(objects, sender_relations, receiver_relations, relation_info)
loss = criterion(predicted.flatten(), target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses.append(loss.data.cpu().numpy())
clear_output(True)
plt.figure(figsize=(20,5))
plt.subplot(131)
plt.title('Epoch %s RMS Error %s' % (epoch, np.sqrt(np.mean(losses[-100:]))))
plt.plot(losses)
plt.show()
###Output
0.0 percent
0.09090909090909091 percent
0.18181818181818182 percent
0.27272727272727276 percent
0.36363636363636365 percent
0.45454545454545453 percent
0.5454545454545455 percent
0.6363636363636364 percent
0.7272727272727273 percent
0.8181818181818182 percent
0.9090909090909091 percent
1.0 percent
1.090909090909091 percent
1.1818181818181819 percent
1.2727272727272727 percent
1.3636363636363635 percent
1.4545454545454546 percent
1.5454545454545454 percent
1.6363636363636365 percent
1.7272727272727273 percent
1.8181818181818181 percent
1.9090909090909092 percent
2.0 percent
2.090909090909091 percent
2.181818181818182 percent
2.272727272727273 percent
2.3636363636363638 percent
2.4545454545454546 percent
2.5454545454545454 percent
2.6363636363636362 percent
2.727272727272727 percent
2.8181818181818183 percent
2.909090909090909 percent
3.0 percent
3.090909090909091 percent
3.1818181818181817 percent
3.272727272727273 percent
3.3636363636363638 percent
3.4545454545454546 percent
3.5454545454545454 percent
3.6363636363636362 percent
3.727272727272727 percent
3.8181818181818183 percent
3.909090909090909 percent
4.0 percent
4.090909090909091 percent
4.181818181818182 percent
4.2727272727272725 percent
4.363636363636364 percent
4.454545454545455 percent
4.545454545454546 percent
4.636363636363637 percent
4.7272727272727275 percent
4.818181818181818 percent
4.909090909090909 percent
5.0 percent
5.090909090909091 percent
5.181818181818182 percent
5.2727272727272725 percent
5.363636363636363 percent
5.454545454545454 percent
5.545454545454546 percent
5.636363636363637 percent
5.7272727272727275 percent
5.818181818181818 percent
5.909090909090909 percent
6.0 percent
6.090909090909091 percent
6.181818181818182 percent
6.2727272727272725 percent
6.363636363636363 percent
6.454545454545454 percent
6.545454545454546 percent
6.636363636363636 percent
6.7272727272727275 percent
6.8181818181818175 percent
6.909090909090909 percent
7.000000000000001 percent
7.090909090909091 percent
7.1818181818181825 percent
7.2727272727272725 percent
7.363636363636364 percent
7.454545454545454 percent
7.545454545454546 percent
7.636363636363637 percent
7.727272727272727 percent
7.818181818181818 percent
7.909090909090908 percent
8.0 percent
8.09090909090909 percent
8.181818181818182 percent
8.272727272727273 percent
8.363636363636363 percent
8.454545454545455 percent
8.545454545454545 percent
8.636363636363637 percent
8.727272727272728 percent
8.818181818181818 percent
8.90909090909091 percent
9.0 percent
9.090909090909092 percent
9.181818181818182 percent
9.272727272727273 percent
9.363636363636365 percent
9.454545454545455 percent
9.545454545454547 percent
9.636363636363637 percent
9.727272727272727 percent
9.818181818181818 percent
9.909090909090908 percent
10.0 percent
10.09090909090909 percent
10.181818181818182 percent
10.272727272727272 percent
10.363636363636363 percent
10.454545454545453 percent
10.545454545454545 percent
10.636363636363637 percent
10.727272727272727 percent
10.818181818181818 percent
10.909090909090908 percent
11.0 percent
11.090909090909092 percent
11.181818181818182 percent
11.272727272727273 percent
11.363636363636363 percent
11.454545454545455 percent
11.545454545454545 percent
11.636363636363637 percent
11.727272727272728 percent
11.818181818181818 percent
11.90909090909091 percent
12.0 percent
12.090909090909092 percent
12.181818181818182 percent
12.272727272727273 percent
12.363636363636363 percent
12.454545454545455 percent
12.545454545454545 percent
12.636363636363637 percent
12.727272727272727 percent
12.818181818181817 percent
12.909090909090908 percent
13.0 percent
13.090909090909092 percent
13.18181818181818 percent
13.272727272727272 percent
13.363636363636363 percent
13.454545454545455 percent
13.545454545454547 percent
13.636363636363635 percent
13.727272727272727 percent
13.818181818181818 percent
13.90909090909091 percent
14.000000000000002 percent
14.09090909090909 percent
14.181818181818182 percent
14.272727272727273 percent
14.363636363636365 percent
14.454545454545453 percent
14.545454545454545 percent
14.636363636363637 percent
14.727272727272728 percent
14.81818181818182 percent
14.909090909090908 percent
15.0 percent
15.090909090909092 percent
15.181818181818182 percent
15.272727272727273 percent
15.363636363636363 percent
15.454545454545453 percent
15.545454545454545 percent
15.636363636363637 percent
15.727272727272728 percent
15.818181818181817 percent
15.909090909090908 percent
16.0 percent
16.090909090909093 percent
16.18181818181818 percent
16.272727272727273 percent
16.363636363636363 percent
16.454545454545453 percent
16.545454545454547 percent
16.636363636363637 percent
16.727272727272727 percent
16.818181818181817 percent
16.90909090909091 percent
17.0 percent
17.09090909090909 percent
17.18181818181818 percent
17.272727272727273 percent
17.363636363636363 percent
17.454545454545457 percent
17.545454545454543 percent
17.636363636363637 percent
17.727272727272727 percent
17.81818181818182 percent
17.909090909090907 percent
18.0 percent
18.09090909090909 percent
18.181818181818183 percent
18.272727272727273 percent
18.363636363636363 percent
18.454545454545453 percent
18.545454545454547 percent
18.636363636363637 percent
18.72727272727273 percent
18.818181818181817 percent
18.90909090909091 percent
|
ES_Example.ipynb | ###Markdown
A Comparison of Evolution Stratagy and Reinforcement Learning on a Toy Dataset
###Code
# Matplotlib config
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from IPython import display
plt.rcParams['image.interpolation'] = 'nearest'
%matplotlib inline
# Import Dependencies
import os
import sys
import json
import time
import numpy as np
from random import shuffle
# Import our experiment code
from experiment.Simulation import GaussianLandscape
g = GaussianLandscape(n=30)
g.plot_landscape()
# generate a toy 2D regression dataset
sz = 1000
X,Y = np.meshgrid(np.linspace(-1,1,sz),np.linspace(-1,1,sz))
# Generate four Gaussians
mux,muy,sigma=0.3,-0.3,4
G1 = np.exp(-((X-mux)**2+(Y-muy)**2)/2.0*sigma**2)
mux,muy,sigma=-0.3,0.3,2
G2 = np.exp(-((X-mux)**2+(Y-muy)**2)/2.0*sigma**2)
mux,muy,sigma=0.6,0.6,2
G3 = np.exp(-((X-mux)**2+(Y-muy)**2)/2.0*sigma**2)
mux,muy,sigma=-0.4,-0.2,3
G4 = np.exp(-((X-mux)**2+(Y-muy)**2)/2.0*sigma**2)
# Combine Gaussians with addition and subtraction
G = G1 + G2 - G3 - G4
# Plot Dataset
fig,ax = plt.subplots()
im = ax.imshow(G, vmin=-1, vmax=1, cmap='jet')
w = np.array([500.0, 500.0]) # start point
noise = np.random.randn(200, 2)
wp = np.expand_dims(w, 0) + sigma*noise
x,y = zip(*wp)
plt.scatter(x,y,4,'k', edgecolors='face')
###Output
_____no_output_____
###Markdown
NeuroEvolution Solution
###Code
np.random.seed(3)
nn = 4 # number of steps to take (and plot horizontally)
alpha = 0.03 # learning rate
sigma = 3 # standard deviation of the samples around current parameter vector
w = np.array([70.0, 60.0]) # start point
plt.figure(figsize=(20,5))
prevx, prevy = [], []
for q in range(nn):
# draw the optimization landscape
ax1 = plt.subplot(1,nn,q+1)
plt.imshow(G, vmin=-1, vmax=1, cmap='jet')
# draw a population of samples in black
noise = np.random.randn(200, 2)
wp = np.expand_dims(w, 0) + sigma*noise
x,y = zip(*wp)
plt.scatter(x,y,4,'k', edgecolors='face')
# draw the current parameter vector in white
plt.scatter([w[0]],[w[1]],40,'w', edgecolors='face')
# draw estimated gradient as white arrow
R = np.array([G[int(wi[1]), int(wi[0])] for wi in wp])
R -= R.mean()
R /= R.std() # standardize the rewards to be N(0,1) gaussian
g = np.dot(R, noise)
u = alpha * g
plt.arrow(w[0], w[1], u[0], u[1], head_width=3, head_length=5, fc='w', ec='w')
plt.axis('off')
plt.title('iteration %d, reward %.2f' % (q+1, G[int(w[0]), int(w[1])]))
# draw the history of optimization as a white line
prevx.append(w[0])
prevy.append(w[1])
if len(prevx) > 0:
plt.plot(prevx, prevy, 'wo-')
w += u
plt.axis('tight')
#plt.savefig('evo.png',bbox_inches='tight',pad_inches=0,dpi=200)
###Output
_____no_output_____
###Markdown
Making it More Challenging
###Code
# Import our experiment code
from experiment.Simulation import GaussianLandscape
g = GaussianLandscape(n=20)
g.plot_landscape()
# generate a toy 2D regression dataset
sz = 1000
X,Y = np.meshgrid(np.linspace(-1,1,sz),np.linspace(-1,1,sz))
# Generate four Gaussians
mux,muy,sigma=0.3,-0.3,4
G1 = np.exp(-((X-mux)**2+(Y-muy)**2)/2.0*sigma**2)
mux,muy,sigma=-0.3,0.3,2
G2 = np.exp(-((X-mux)**2+(Y-muy)**2)/2.0*sigma**2)
mux,muy,sigma=0.6,0.6,2
G3 = np.exp(-((X-mux)**2+(Y-muy)**2)/2.0*sigma**2)
mux,muy,sigma=-0.4,-0.2,3
G4 = np.exp(-((X-mux)**2+(Y-muy)**2)/2.0*sigma**2)
mux,muy,sigma=-0.4,-0.1,7
G5 = np.exp(-((X-mux)**2+(Y-muy)**2)/2.0*sigma**2)
mux,muy,sigma=-0,-0,2
G6 = np.exp(-((X-mux)**2+(Y-muy)**2)/2.0*sigma**2)
# Combine Gaussians with addition and subtraction
G = G1 + G2 - G3 - G4 + G5 - G6
# Plot Dataset
fig,ax = plt.subplots()
im = ax.imshow(G, vmin=-1, vmax=1, cmap='jet')
from math import ceil
np.random.seed(3)
nn = 5 # number of steps to take (and plot horizontally)
alpha = 0.9 # learning rate
sigma = 50 #
pop_size = 500
mm = 5
plt.figure(figsize=(20,5*mm))
for z in range(mm):
gl = GaussianLandscape(n=30)
G = gl.combine_gaussians()
w = np.array([500.0, 500.0]) # start point
prevx, prevy = [], []
for q in range(nn):
# draw the optimization landscape
m = ceil(nn*mm/5)
ax1 = plt.subplot(m,5,q+1 + (z*5))
plt.imshow(G, vmin=-1, vmax=1, cmap='jet')
# draw a population of samples in black
noise = np.random.randn(200, 2)
wp = np.expand_dims(w, 0) + sigma*noise
x,y = zip(*wp)
plt.scatter(x,y,4,'k', edgecolors='face')
# draw the current parameter vector in white
plt.scatter([w[0]],[w[1]],40,'w', edgecolors='face')
# draw estimated gradient as white arrow
R = np.array([G[int(wi[1]), int(wi[0])] for wi in wp])
R -= R.mean()
R /= R.std() # standardize the rewards to be N(0,1) gaussian
g = np.dot(R, noise)
u = alpha * g
plt.arrow(w[0], w[1], u[0], u[1], head_width=3, head_length=5, fc='w', ec='w')
plt.axis('off')
plt.title('iteration %d, reward %.2f, position (%d,%d)' % (q+1, G[int(w[0]), int(w[1])], w[0],w[1]))
# draw the history of optimization as a white line
prevx.append(w[0])
prevy.append(w[1])
if len(prevx) > 0:
plt.plot(prevx, prevy, 'wo-')
w += u
plt.axis('tight')
#plt.savefig('evo.png',bbox_inches='tight',pad_inches=0,dpi=200)
###Output
_____no_output_____ |
notebooks/human_prod_line.ipynb | ###Markdown
PC assemble production line simulation (human edition) Initialization Import the required libraries.
###Code
%matplotlib inline
%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
from modsim import *
import random
import statistics
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
np.random.seed(7)
###Output
_____no_output_____
###Markdown
Initialize the state object
###Code
pcs = State(left = 0)
time = State(elapsed = 0)
###Output
_____no_output_____
###Markdown
Initialize the TimeSeries objects
###Code
assembled_pcs = TimeSeries()
###Output
_____no_output_____
###Markdown
Declaration Declare the `random events` object
###Code
# Key: event name, value: probability
events = {
'ausentismo': 0.05,
'accidente': 0.05,
'error': 0.15
}
###Output
_____no_output_____
###Markdown
Declare the `randomness` function:
###Code
# When called, returns an array of events happened
def randomness():
events_happened = []
for key in events:
if flip(events[key]):
events_happened.append(key)
if key == 'ausentismo':
return events_happened
return events_happened
###Output
_____no_output_____
###Markdown
Declare the `assemble_pcs` function.
###Code
def assemble_pcs(employees, productivity):
total_time = 0
for i in range(employees):
random_events_index = 1
# Implementation of random events
events_happened = randomness()
# Checks events happened
if 'accidente' in events_happened:
random_events_index += 0.25
if 'error' in events_happened:
random_events_index += 0.50
if 'ausentismo' in events_happened:
random_events_index = 0
build_time = random.randint(1800, 2900)
productivity_index = (1 - productivity) + 1
total_time += build_time * productivity_index * random_events_index
average_time = total_time / employees
return average_time
###Output
_____no_output_____
###Markdown
Declare the `get_productivity` function.
###Code
def get_productivity(time_elapsed):
if (time.elapsed > 10800):
if (time.elapsed < 14400):
return 0.8
if (time.elapsed > 21600):
return 0.5
return 1.0
###Output
_____no_output_____
###Markdown
Declare the `draw` function.
###Code
def draw():
plot(assembled_pcs, color='blue', label='Assembled pcs')
decorate(title='Assembled pcs by persons per day',
xlabel='Time step (work days)',
ylabel='Number of pcs')
savefig('figs/chap02-fig01.pdf')
###Output
_____no_output_____
###Markdown
Declare the `run_simulation` function.
###Code
def run_simulation(employees, pc_quantity):
pcs.left = pc_quantity
time_limit = 7 * 60 * 60 # work day time in seconds
day = 1
while True:
productivity = get_productivity(time.elapsed)
assemble_time = assemble_pcs(employees, productivity)
time.elapsed += assemble_time
pcs.left -= employees
if (time.elapsed >= time_limit):
assembled_pcs[day] = pc_quantity - pcs.left
time.elapsed = 0
print("Day " + str(day) + ": " + str(pc_quantity - pcs.left))
day += 1
if (pcs.left <= 0):
assembled_pcs[day] = pc_quantity
print("Finished " + str(pc_quantity) + " pcs in " + str(day) + " days!")
break
draw()
###Output
_____no_output_____
###Markdown
Run simulation - Employees: - Pc_quantity:
###Code
run_simulation(20, 1500)
###Output
Day 1: 200
Day 2: 400
Day 3: 600
Day 4: 800
Day 5: 1000
Day 6: 1200
Day 7: 1400
Finished 1500 pcs in 8 days!
Saving figure to file figs/chap02-fig01.pdf
###Markdown
PC assemble production line simulation (machine edition) Initialization Import the required libraries.
###Code
%matplotlib inline
%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
from modsim import *
import random
import statistics
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
np.random.seed(7)
###Output
_____no_output_____
###Markdown
Initialize the state object
###Code
pcs = State(left = 0)
time = State(elapsed = 0)
###Output
_____no_output_____
###Markdown
Initialize the TimeSeries objects
###Code
assembled_pcs = TimeSeries()
###Output
_____no_output_____
###Markdown
Declaration Declare the `assemble_pcs` function.
###Code
def assemble_pcs():
total_time = 0
machine_parallel_processes = 18
time = random.randint(669, 934)
total_time = time / machine_parallel_processes
return total_time
###Output
_____no_output_____
###Markdown
Declare the `draw` function.
###Code
def draw():
plot(assembled_pcs, color='green', label='Assembled pcs')
decorate(title='Assembled pcs by persons per day',
xlabel='Time step (work days)',
ylabel='Number of pcs')
savefig('figs/chap02-fig01.pdf')
###Output
_____no_output_____
###Markdown
Declare the `run_simulation` function.
###Code
def run_simulation(pc_quantity):
pcs.left = pc_quantity
time_limit = 8 * 60 * 60 # work day time in seconds
day = 1
while True:
assemble_time = assemble_pcs()
time.elapsed += assemble_time
pcs.left -= 1
if (time.elapsed >= time_limit):
assembled_pcs[day] = pc_quantity - pcs.left
time.elapsed = 0
print("Day " + str(day) + ": " + str(pc_quantity - pcs.left))
day += 1
if (pcs.left <= 0):
assembled_pcs[day] = pc_quantity
print("Finished " + str(pc_quantity) + " pcs in " + str(day) + " days!")
break
draw()
###Output
_____no_output_____
###Markdown
Run simulation - Pc_quantity:
###Code
run_simulation(1500)
###Output
Day 1: 655
Day 2: 1311
Finished 1500 pcs in 3 days!
Saving figure to file figs/chap02-fig01.pdf
|
Seminar/bayesian_optimization.ipynb | ###Markdown
Bayesian optimization with `skopt`Gilles Louppe, Manoj Kumar July 2016.
###Code
!pip install scikit-optimize
import numpy as np
np.random.seed(123)
%matplotlib inline
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Problem statementWe are interested in solving $$x^* = \arg \min_x f(x)$$ under the constraints that- $f$ is a black box for which no closed form is known (nor its gradients);- $f$ is expensive to evaluate;- and evaluations of $y = f(x)$ may be noisy.**Disclaimer.** If you do not have these constraints, then there is certainly a better optimization algorithm than Bayesian optimization. Bayesian optimization loopFor $t=1:T$:1. Given observations $(x_i, y_i=f(x_i))$ for $i=1:t$, build a probabilistic model for the objective $f$. Integrate out all possible true functions, using Gaussian process regression. 2. optimize a cheap acquisition/utility function $u$ based on the posterior distribution for sampling the next point. $$x_{t+1} = \arg \min_x u(x)$$ Exploit uncertainty to balance exploration against exploitation. 3. Sample the next observation $y_{t+1}$ at $x_{t+1}$. Acquisition functionsAcquisition functions $\text{u}(x)$ specify which sample $x$ should be tried next:- Expected improvement (default): $-\text{EI}(x) = -\mathbb{E} [f(x) - f(x_t^+)] $;- Lower confidence bound: $\text{LCB}(x) = \mu_{GP}(x) + \kappa \sigma_{GP}(x)$;- Probability of improvement: $-\text{PI}(x) = -P(f(x) \geq f(x_t^+) + \kappa) $;where $x_t^+$ is the best point observed so far.In most cases, acquisition functions provide knobs (e.g., $\kappa$) forcontrolling the exploration-exploitation trade-off.- Search in regions where $\mu_{GP}(x)$ is high (exploitation)- Probe regions where uncertainty $\sigma_{GP}(x)$ is high (exploration) Toy exampleLet assume the following noisy function $f$:
###Code
noise_level = 0.1
def f(x, noise_level=noise_level):
return np.sin(5 * x[0]) * (1 - np.tanh(x[0] ** 2)) + np.random.randn() * noise_level
###Output
_____no_output_____
###Markdown
**Note.** In `skopt`, functions $f$ are assumed to take as input a 1D vector $x$ represented as an array-like and to return a scalar $f(x)$.
###Code
# Plot f(x) + contours
x = np.linspace(-2, 2, 400).reshape(-1, 1)
fx = [f(x_i, noise_level=0.0) for x_i in x]
plt.plot(x, fx, "r--", label="True (unknown)")
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate(([fx_i - 1.9600 * noise_level for fx_i in fx],
[fx_i + 1.9600 * noise_level for fx_i in fx[::-1]])),
alpha=.2, fc="r", ec="None")
plt.legend()
plt.grid()
plt.show()
###Output
_____no_output_____
###Markdown
Bayesian optimization based on gaussian process regression is implemented in `skopt.gp_minimize` and can be carried out as follows:
###Code
from skopt import gp_minimize
res = gp_minimize(f, # the function to minimize
[(-2.0, 2.0)], # the bounds on each dimension of x
acq_func="EI", # the acquisition function
n_calls=15, # the number of evaluations of f
n_random_starts=5, # the number of random initialization points
noise=0.1**2, # the noise level (optional)
random_state=123) # the random seed
###Output
_____no_output_____
###Markdown
Accordingly, the approximated minimum is found to be:
###Code
"x^*=%.4f, f(x^*)=%.4f" % (res.x[0], res.fun)
###Output
_____no_output_____
###Markdown
For further inspection of the results, attributes of the `res` named tuple provide the following information:- `x` [float]: location of the minimum.- `fun` [float]: function value at the minimum.- `models`: surrogate models used for each iteration.- `x_iters` [array]: location of function evaluation for each iteration.- `func_vals` [array]: function value for each iteration.- `space` [Space]: the optimization space.- `specs` [dict]: parameters passed to the function.
###Code
print(res)
###Output
fun: -1.071498592229307
func_vals: array([-0.18788762, -0.96229886, -0.34643484, -0.46587165, -0.1773319 ,
-0.80809029, -1.07149859, -0.92120939, -0.1608395 , -0.80998886,
-0.32494081, 0.0329113 , -0.02279617, 0.05849308, 0.69296704])
models: [GaussianProcessRegressor(alpha=1e-10, copy_X_train=True,
kernel=1**2 * Matern(length_scale=1, nu=2.5) + WhiteKernel(noise_level=0.01),
n_restarts_optimizer=2, noise=0.010000000000000002,
normalize_y=True, optimizer='fmin_l_bfgs_b',
random_state=843828734), GaussianProcessRegressor(alpha=1e-10, copy_X_train=True,
kernel=1**2 * Matern(length_scale=1, nu=2.5) + WhiteKernel(noise_level=0.01),
n_restarts_optimizer=2, noise=0.010000000000000002,
normalize_y=True, optimizer='fmin_l_bfgs_b',
random_state=843828734), GaussianProcessRegressor(alpha=1e-10, copy_X_train=True,
kernel=1**2 * Matern(length_scale=1, nu=2.5) + WhiteKernel(noise_level=0.01),
n_restarts_optimizer=2, noise=0.010000000000000002,
normalize_y=True, optimizer='fmin_l_bfgs_b',
random_state=843828734), GaussianProcessRegressor(alpha=1e-10, copy_X_train=True,
kernel=1**2 * Matern(length_scale=1, nu=2.5) + WhiteKernel(noise_level=0.01),
n_restarts_optimizer=2, noise=0.010000000000000002,
normalize_y=True, optimizer='fmin_l_bfgs_b',
random_state=843828734), GaussianProcessRegressor(alpha=1e-10, copy_X_train=True,
kernel=1**2 * Matern(length_scale=1, nu=2.5) + WhiteKernel(noise_level=0.01),
n_restarts_optimizer=2, noise=0.010000000000000002,
normalize_y=True, optimizer='fmin_l_bfgs_b',
random_state=843828734), GaussianProcessRegressor(alpha=1e-10, copy_X_train=True,
kernel=1**2 * Matern(length_scale=1, nu=2.5) + WhiteKernel(noise_level=0.01),
n_restarts_optimizer=2, noise=0.010000000000000002,
normalize_y=True, optimizer='fmin_l_bfgs_b',
random_state=843828734), GaussianProcessRegressor(alpha=1e-10, copy_X_train=True,
kernel=1**2 * Matern(length_scale=1, nu=2.5) + WhiteKernel(noise_level=0.01),
n_restarts_optimizer=2, noise=0.010000000000000002,
normalize_y=True, optimizer='fmin_l_bfgs_b',
random_state=843828734), GaussianProcessRegressor(alpha=1e-10, copy_X_train=True,
kernel=1**2 * Matern(length_scale=1, nu=2.5) + WhiteKernel(noise_level=0.01),
n_restarts_optimizer=2, noise=0.010000000000000002,
normalize_y=True, optimizer='fmin_l_bfgs_b',
random_state=843828734), GaussianProcessRegressor(alpha=1e-10, copy_X_train=True,
kernel=1**2 * Matern(length_scale=1, nu=2.5) + WhiteKernel(noise_level=0.01),
n_restarts_optimizer=2, noise=0.010000000000000002,
normalize_y=True, optimizer='fmin_l_bfgs_b',
random_state=843828734), GaussianProcessRegressor(alpha=1e-10, copy_X_train=True,
kernel=1**2 * Matern(length_scale=1, nu=2.5) + WhiteKernel(noise_level=0.01),
n_restarts_optimizer=2, noise=0.010000000000000002,
normalize_y=True, optimizer='fmin_l_bfgs_b',
random_state=843828734), GaussianProcessRegressor(alpha=1e-10, copy_X_train=True,
kernel=1**2 * Matern(length_scale=1, nu=2.5) + WhiteKernel(noise_level=0.01),
n_restarts_optimizer=2, noise=0.010000000000000002,
normalize_y=True, optimizer='fmin_l_bfgs_b',
random_state=843828734)]
random_state: <mtrand.RandomState object at 0x7f2e17e3a288>
space: Space([Real(low=-2.0, high=2.0, prior='uniform', transform='normalize')])
specs: {'args': {'n_jobs': 1, 'kappa': 1.96, 'xi': 0.01, 'n_restarts_optimizer': 5, 'n_points': 10000, 'callback': None, 'verbose': False, 'random_state': <mtrand.RandomState object at 0x7f2e17e3a288>, 'y0': None, 'x0': None, 'acq_optimizer': 'auto', 'acq_func': 'EI', 'n_random_starts': 5, 'n_calls': 15, 'base_estimator': GaussianProcessRegressor(alpha=1e-10, copy_X_train=True,
kernel=1**2 * Matern(length_scale=1, nu=2.5),
n_restarts_optimizer=2, noise=0.010000000000000002,
normalize_y=True, optimizer='fmin_l_bfgs_b',
random_state=843828734), 'dimensions': Space([Real(low=-2.0, high=2.0, prior='uniform', transform='normalize')]), 'func': <function f at 0x7f2e1d8b8158>}, 'function': 'base_minimize'}
x: [-0.25728223389301497]
x_iters: [[0.8518212820929092], [-0.2861162952526968], [0.7635394201074472], [0.8766012406190926], [-0.03552426626961047], [-0.30626086081540116], [-0.25728223389301497], [-0.22378966711457093], [1.0553495077507762], [-0.25562401712808525], [-0.5244265431906165], [-1.9992867763849413], [1.999866281741757], [-1.2339044799440428], [0.4596022277478369]]
###Markdown
Together these attributes can be used to visually inspect the results of the minimization, such as the convergence trace or the acquisition function at the last iteration:
###Code
from skopt.plots import plot_convergence
plot_convergence(res);
###Output
_____no_output_____
###Markdown
Let us now visually examine1. The approximation of the fit gp model to the original function.2. The acquistion values that determine the next point to be queried.
###Code
from skopt.acquisition import gaussian_ei
plt.rcParams["figure.figsize"] = (12, 21)
x = np.linspace(-2, 2, 400).reshape(-1, 1)
x_gp = res.space.transform(x.tolist())
fx = np.array([f(x_i, noise_level=0.0) for x_i in x])
# Plot the 5 iterations following the 5 random points
for n_iter in range(5):
gp = res.models[n_iter]
curr_x_iters = res.x_iters[:5+n_iter]
curr_func_vals = res.func_vals[:5+n_iter]
# Plot true function.
plt.subplot(5, 2, 2*n_iter+1)
plt.plot(x, fx, "r--", label="True (unknown)")
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate([fx - 1.9600 * noise_level,
fx[::-1] + 1.9600 * noise_level]),
alpha=.2, fc="r", ec="None")
# Plot GP(x) + contours
y_pred, sigma = gp.predict(x_gp, return_std=True)
plt.plot(x, y_pred, "g--", label=r"$\mu_{GP}(x)$")
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.2, fc="g", ec="None")
# Plot sampled points
plt.plot(curr_x_iters, curr_func_vals,
"r.", markersize=8, label="Observations")
# Adjust plot layout
plt.grid()
if n_iter == 0:
plt.legend(loc="best", prop={'size': 6}, numpoints=1)
if n_iter != 4:
plt.tick_params(axis='x', which='both', bottom=False,
top=False, labelbottom=False)
# Plot EI(x)
plt.subplot(5, 2, 2*n_iter+2)
acq = gaussian_ei(x_gp, gp, y_opt=np.min(curr_func_vals))
plt.plot(x, acq, "b", label="EI(x)")
plt.fill_between(x.ravel(), -2.0, acq.ravel(), alpha=0.3, color='blue')
next_x = res.x_iters[5+n_iter]
next_acq = gaussian_ei(res.space.transform([next_x]), gp, y_opt=np.min(curr_func_vals))
plt.plot(next_x, next_acq, "bo", markersize=6, label="Next query point")
# Adjust plot layout
plt.ylim(0, 0.1)
plt.grid()
if n_iter == 0:
plt.legend(loc="best", prop={'size': 6}, numpoints=1)
if n_iter != 4:
plt.tick_params(axis='x', which='both', bottom=False,
top=False, labelbottom=False)
plt.show()
###Output
_____no_output_____
###Markdown
The first column shows the following:1. The true function.2. The approximation to the original function by the gaussian process model3. How sure the GP is about the function.The second column shows the acquisition function values after every surrogate model is fit. It is possible that we do not choose the global minimum but a local minimum depending on the minimizer used to minimize the acquisition function.At the points closer to the points previously evaluated at, the variance dips to zero. Finally, as we increase the number of points, the GP model approaches the actual function. The final few points are clustered around the minimum because the GP does not gain anything more by further exploration:
###Code
plt.rcParams["figure.figsize"] = (6, 4)
# Plot f(x) + contours
x = np.linspace(-2, 2, 400).reshape(-1, 1)
x_gp = res.space.transform(x.tolist())
fx = [f(x_i, noise_level=0.0) for x_i in x]
plt.plot(x, fx, "r--", label="True (unknown)")
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate(([fx_i - 1.9600 * noise_level for fx_i in fx],
[fx_i + 1.9600 * noise_level for fx_i in fx[::-1]])),
alpha=.2, fc="r", ec="None")
# Plot GP(x) + contours
gp = res.models[-1]
y_pred, sigma = gp.predict(x_gp, return_std=True)
plt.plot(x, y_pred, "g--", label=r"$\mu_{GP}(x)$")
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.2, fc="g", ec="None")
# Plot sampled points
plt.plot(res.x_iters,
res.func_vals,
"r.", markersize=15, label="Observations")
plt.title(r"$x^* = %.4f, f(x^*) = %.4f$" % (res.x[0], res.fun))
plt.legend(loc="best", prop={'size': 8}, numpoints=1)
plt.grid()
plt.show()
from __future__ import print_function
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(), tuned_parameters, cv=5,
scoring='%s_macro' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
###Output
_____no_output_____ |
supplementary-material/Final-Projects/2019/LA-Airbnb-Price-Prediction.ipynb | ###Markdown
CONCLUSION: Most Airbnbs with a rating greater than 90 have been given a 100 rating.
###Code
#STAGE 2 - BASIC ANALYSIS
#Some analysis on importance of basic Airbnb characteristics
#How many owners who were rated above 90 had a strict, flexible or moderate cancellation policy?
print('Flexible: ' + str(round((LA_90s[LA_90s['cancellation_policy'] == 'flexible'].shape[0] / LA_90s.shape[0] * 100),2)) + '%')
print('Moderate: ' + str(round((LA_90s[LA_90s['cancellation_policy'] == 'moderate'].shape[0] / LA_90s.shape[0] * 100),2)) + '%')
print('Strict: ' + str(round((LA_90s[LA_90s['cancellation_policy'] == 'strict'].shape[0] / LA_90s.shape[0] * 100),2)) + '%')
#Plot for better understanding
ax = sns.catplot(y="cancellation_policy", kind="count", data=LA_90s, height=2.6, aspect=2.5)
###Output
_____no_output_____
###Markdown
CONCLUSION - Surprisingly, owners rated greater than 90 mostly have a 'strict' cancellation policy although the assumption is that customers do not like strict policies. But, we can also see that none of them have a 'Super strict' policy.
###Code
#How many hosts are superhosts?
print('Superhost: ' + str(round((LA_90s[LA_90s['host_is_superhost'] == 't'].shape[0] / LA_90s.shape[0] * 100),2)) + '%')
print('Not a superhost: ' + str(round((LA_90s[LA_90s['host_is_superhost'] == 'f'].shape[0] / LA_90s.shape[0] * 100),2)) + '%')
#Plot for better understanding
ax = sns.catplot(y="host_is_superhost", kind="count", data=LA_90s, height=2.6, aspect=2.5)
###Output
_____no_output_____
###Markdown
CONCLUSION - Again, surprisingly enough, apparently it doesn't take a host to be a superhost to get a rating above 90.
###Code
#How important is being instantly bookable for a rating greater than 90?
print('Instantly bookable: ' + str(round((LA_90s[LA_90s['instant_bookable'] == 't'].shape[0] / LA_90s.shape[0] * 100),2)) + '%')
print('Not instantly bookable: ' + str(round((LA_90s[LA_90s['instant_bookable'] == 'f'].shape[0] / LA_90s.shape[0] * 100),2)) + '%')
#Plot for better understanding
ax = sns.catplot(y="instant_bookable", kind="count", data=LA_90s, height=2.6, aspect=2.5)
###Output
_____no_output_____
###Markdown
CONCLUSION - Clearly, it is not necessary for an Airbnb to be instantly bookable to have a great rating. All our initial assumptions have been proven wrong.
###Code
#STAGE 3 - DEEPER ANALYSIS
#So what affects a good rating? How do we know what a customer wants?
#Let us try to convert "Price" from float to string
#Remove the dollar sign
LA_90s['price'] = LA_90s['price'].str[1:]
#Replace all commas with "" and convert to float
LA_90s['price'] = LA_90s['price'].str.replace(",","")
LA_90s['price'] = LA_90s['price'].astype(float)
#We now see that "Price" is a float value
LA_90s['price']
#Understand the effect of price and number of bedrooms/bathrooms on rating
g = sns.PairGrid(LA_90s, y_vars=["price"], x_vars=["bathrooms", "bedrooms"], height=4.5, hue="review_scores_rating", aspect=1.1)
ax = g.map(plt.scatter, alpha=0.3)
###Output
_____no_output_____
###Markdown
CONCLUSION: No significant corelation between price and number of bedrooms except a place with many bedrooms would obviously be priced highly.
###Code
#To understand if increase in price increases rating
sns.lmplot(x="price", y="review_scores_rating", data=LA_90s);
###Output
_____no_output_____
###Markdown
CONCLUSION: The price of an Airbnb and its rating does not seem corelated until the price is much higher. This would imply that a pricier place is more likely to offer ameneties and services that would get a better rating. REGRESSION
###Code
#STAGE 4- REGRESSION
#Preprocessing - dropping values - Let us create a new dataset with values that would matter to the regression.
LA_90_Reg = LA_90s.drop(columns=['street','city','host_neighbourhood', 'security_deposit', 'cleaning_fee', 'property_type', 'bed_type', 'room_type', 'host_total_listings_count', 'host_response_time', 'host_response_rate', 'state', 'zipcode', 'market', 'smart_location', 'accommodates', 'beds', 'amenities', 'guests_included', 'extra_people', 'minimum_nights', 'maximum_nights', 'first_review', 'last_review', 'instant_bookable', 'reviews_per_month'])
LA_90_Reg.info()
#Preprocessing - categorical values
LA_90_Reg_dum = pd.get_dummies(LA_90_Reg, columns=['host_is_superhost', 'host_identity_verified', 'cancellation_policy'])
LA_90_Reg_dum.head()
LA_90_Reg_dum.columns
LA_90_Reg_dum.drop(['host_is_superhost_f', 'host_identity_verified_f', 'cancellation_policy_super_strict_60'], axis=1, inplace=True)
LA_90_Reg_dum.info()
#Replace all Nan (for floats) with Zero
LA_90_Reg_dum['bathrooms'].fillna(0, inplace=True)
LA_90_Reg_dum['bedrooms'].fillna(0, inplace=True)
LA_90_Reg_dum['price'].fillna(0, inplace=True)
LA_90_Reg_dum['review_scores_rating'].fillna(0, inplace=True)
LA_90_Reg_dum['review_scores_accuracy'].fillna(0, inplace=True)
LA_90_Reg_dum['review_scores_cleanliness'].fillna(0, inplace=True)
LA_90_Reg_dum['review_scores_checkin'].fillna(0, inplace=True)
LA_90_Reg_dum['review_scores_communication'].fillna(0, inplace=True)
LA_90_Reg_dum['review_scores_location'].fillna(0, inplace=True)
LA_90_Reg_dum['review_scores_value'].fillna(0, inplace=True)
#Our target variable is "review_scores_rating", and so we are dropping it in X
X = LA_90_Reg_dum.drop('review_scores_rating', axis=1)
X.shape
#Target variable Y contains "review_scores_rating"
y = LA_90s['review_scores_rating']
y.shape
#Split train and test
from sklearn.model_selection import train_test_split
Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.3, random_state=833)
#Import linear regression model
from sklearn.linear_model import LinearRegression
model = LinearRegression()
#Fit the model with train data
model.fit(Xtrain, ytrain)
#Print model coeffs
print("Model coefficients: ", model.coef_)
print("Model intercept:", model.intercept_)
y_model = model.predict(Xtest)
test = Xtest.join(ytest).reset_index()
test.join(pd.Series(y_model, name='predicted')).head()
from sklearn.metrics import mean_absolute_error
mean_absolute_error(ytest, y_model)
###Output
_____no_output_____ |
aodqc_example.ipynb | ###Markdown
Code example accompanying "A cloud screening algorithm for ground-based aerosol optical depth measurements using all-sky images and deep transfer learning."
###Code
import cv2
import glob
import matplotlib.pyplot as plt
import natsort
import numpy as np
import random
import tensorflow as tf
from tensorflow.keras.applications.vgg16 import VGG16 as PretrainedModel, preprocess_input
from tensorflow.keras.models import Model, load_model
###Output
_____no_output_____
###Markdown
Clone the project repository to get access to the raw images.
###Code
# Comment out if you are not using method 1 (Colab)
!git clone https://github.com/eawendt/aodqc.git
###Output
fatal: destination path 'aodqc' already exists and is not an empty directory.
###Markdown
Pre-processing functions The following functions are used to crop an all-sky image to the . Functions with a leading _ are intermediate function. To pre-process an all-sky image, call the ```crop_image``` function with your image as the argument. Before passing the cropped image into the classifier model, be sure to pass the cropped image into the ```prep_vgg``` function, which will scale the channels to values that vgg expects.
###Code
# Data import
def _standardize_image(image, edge_len = 840):
'''
Standardizes input images to a default size of 840x840 pixels
Parameters:
image: An all-sky image
edge_len: The desired edge length
Returns:
scaled: A scaled image
'''
scale = edge_len / image.shape[0]
scaled_image = cv2.resize(image, None, fx = scale, fy = scale, interpolation = cv2.INTER_AREA)
col1 = int((scaled_image.shape[1] - scaled_image.shape[0]) / 2)
col2 = int(scaled_image.shape[1] - col1)
row1 = int(0)
row2 = int(scaled_image.shape[1])
scaled = scaled_image[row1:row2, col1:col2]
return(scaled)
# Processing
def _threshold_image(image):
'''
Applies a multistage thresholding procedure to an all-sky image
Parameters:
image: An all-sky image
Returns:
seg: An all-sky image with the sunlit pixels isolated
'''
b = image[:, :, 0] # Get only the blue channel
th, seg_tmp = cv2.threshold(b, 252, 255, cv2.THRESH_BINARY)
seg_tmp = cv2.GaussianBlur(src = seg_tmp , ksize = (15, 15), sigmaX = 0)
seg_tmp = cv2.bilateralFilter(src = seg_tmp , d = 15, sigmaColor = 25, sigmaSpace = 25)
th, seg = cv2.threshold(seg_tmp, 50, 255, cv2.THRESH_BINARY)
return seg
def _calc_sun_contour(seg):
'''
Finds the contour most likely to be the solar disk.
Parameters:
seg: A previously thresholded all-sky image.
Returns: The contour of the solar disk.
'''
contours, hierarchy = cv2.findContours(image = seg, mode = cv2.RETR_EXTERNAL, method = cv2.CHAIN_APPROX_SIMPLE)
max_circ = 0
sun = None
for cn in contours:
area = cv2.contourArea(cn)
if (area > 2750):
arc_length = cv2.arcLength(cn, True)
circularity = 4 * np.pi * area / (arc_length * arc_length)
if circularity > max_circ:
max_circ = circularity
sun = cn
return sun
def _calc_sun_center(contour):
'''
Finds the center of a contour, in this case, the contour of the solar disk.
Parameters:
contour: A contour returned by the _calc_sun_contour() function
Returns:
cx: The row of the image containing the center of the solar disk
cy: The column of the image containing the center of the solar disk
'''
M = cv2.moments(contour)
cx = int(M["m01"] / M["m00"])
cy = int(M["m10"] / M["m00"])
return cx, cy
def _crop_to_sun(image, cx, cy, offset = 112):
'''
Crops the image around the solar disk. If no solar disk is found, then it
crops the image in the center of the image.
Parameters:
image: A scaled all-sky image.
cx: The row of the image containing the center of the solar disk
cy: The column of the image containing the center of the solar disk
offset: Desired offset from center of the solar disk. Default 112
for shape of 224x224.
Returns:
cropped: Cropped all-sky image
'''
xmax = image.shape[0]
ymax = image.shape[1]
# Find how much space there is between sun center and edge of image
spc_xl = cx
spc_xu = xmax - cx
spc_yl = cy
spc_yu = ymax - cy
if (cx <= offset):
xl = cx
else:
xl = offset
if ((xmax - cx) <= offset):
xu = xmax - cx
else:
xu = offset
if (cy <= offset):
yl = cx
else:
yl = offset
if ((ymax - cy) <= offset):
yu = ymax - cy
else:
yu = offset
cropped = image[(cx - xl):(cx + xu), (cy - yl):(cy + yu)]
return cropped
def crop_image(image, offset = 112):
'''
Returns a cropped all-sky image
Parameters:
image: An unprocessed all-sky image
offset: Desired offset from center of the solar disk. Default 112
for shape of 224x224.
Returns:
cropped: A cropped all-sky image
'''
scaled = _standardize_image(image = image)
seg = _threshold_image(image = scaled)
sun = _calc_sun_contour(seg = seg)
if sun is not None:
# If the sun is found, calculate the center of the sun
cx, cy = _calc_sun_center(contour = sun)
else:
# If the sun is not found, set center equal to the center of the scaled image
cx, cy = int(scaled.shape[0] / 2), int(scaled.shape[1] / 2)
cropped = _crop_to_sun(image = scaled, cx = cx, cy = cy, offset = offset)
return cropped
def prep_vgg(image):
'''
Returns an image prepared to interface with vgg network
Parameters:
images: Cropped all-sky image
Returns:
im_vgg: image prepared for input into vgg
'''
im_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # cv2 gives bgr by default while vgg expects rgb
im_vgg = preprocess_input(im_rgb)
return im_vgg
###Output
_____no_output_____
###Markdown
Testing pre-process function on a random image Function to show the image using matplotlib. Note that opencv using BGR color channels so we need to convert to RGB before displaying with matplotlib.
###Code
def show_image(img):
img_rgb = cv2.cvtColor(src=img, code=cv2.COLOR_BGR2RGB)
fig = plt.figure(figsize=[6, 6]);
plt.axis("off");
plt.imshow(img_rgb);
###Output
_____no_output_____
###Markdown
Get a sorted list of raw image files.
###Code
raw_image_files = natsort.natsorted(glob.glob('aodqc/images/*/*/*.jpg'))
rand_ind = random.randint(0, len(raw_image_files))
raw_image = cv2.imread(raw_image_files[rand_ind]) # Change this to look at a specific image
show_image(raw_image)
cropped = crop_image(raw_image)
show_image(cropped)
###Output
_____no_output_____
###Markdown
Run the classifier on the random image Load in a model using the tensorflow load_model function
###Code
model_7 = load_model('/content/aodqc/models/model_7.h5')
###Output
_____no_output_____
###Markdown
Scale the data to be interfaced with VGG
###Code
im_vgg = prep_vgg(cropped)
###Output
_____no_output_____
###Markdown
Define class labels: * 0 = cirrus* 1 = clear* 2 = cloud
###Code
classes = ['Cirrus', 'Clear', 'Cloud']
###Output
_____no_output_____
###Markdown
Run the model to predict the output. Note that the tensorflow predict function expects a 4D tensor, which is why we append a fourth axis using ```tf.newaxis```.
###Code
pred_index = model_7.predict(im_vgg[tf.newaxis,:,:,:]).argmax()
pred_class = classes[pred_index]
###Output
WARNING:tensorflow:6 out of the last 15 calls to <function Model.make_predict_function.<locals>.predict_function at 0x7fa11b4c8320> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for more details.
###Markdown
Plot the cropped image along with the class prediction designation.
###Code
fig = plt.figure(figsize=(6,6));
show_image(raw_image);
plt.title(f'Predicted: {pred_class}', fontsize=24);
plt.axis('off');
###Output
_____no_output_____ |
2020_08_03/简单线性回归.ipynb | ###Markdown
生成测试数据集
###Code
# 定义数据集合
X = np.arange(0,10,0.1, dtype=np.float32)
Y = 2*X + 2*np.random.random(100)
# 可视化数据集
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(1,1,1)
ax.scatter(X,Y)
fig.show()
###Output
_____no_output_____
###Markdown
定义我们的模型这里使用Pytorch中的模型
###Code
class LinearRegression(nn.Module):
def __init__(self):
super(LinearRegression, self).__init__()
self.linear1 = nn.Linear(1, 1)
def forward(self, x):
x = self.linear1(x)
return x
# 模型初始化
linearModel = LinearRegression()
###Output
_____no_output_____
###Markdown
定义损失函数
###Code
loss = torch.nn.MSELoss() # 定义均方损失函数
# 损失函数的例子
x_sample = torch.tensor([1,0], dtype=torch.float32)
y_sample = torch.tensor([1,1], dtype=torch.float32)
l = loss(x_sample, y_sample)
print(l)
###Output
tensor(0.5000)
###Markdown
定义优化器我们在这里定义一个SGD优化器, ```pythonoptimizer = torch.optim.SGD([w], lr=learning_rate)```其中:- w表示我们要更新的参数(网络的权重)- lr表示学习率在Pytorch中, 还optimizer还提供可以一次更新全部的参数, 和参数梯度清零两个功能.- ` optimizer.step()`: 对神经网络(复合函数)的相关变量进行更新, 即所有参数值向梯度相反方向走一步;- `optimizer.zero_grad()`: 对神经网络(复合函数)的相关系数进行梯度的清空;
###Code
# 定义一个SGD优化器
learning_rate = 0.001
optimizer = torch.optim.SGD(linearModel.parameters(), lr=learning_rate)
###Output
_____no_output_____
###Markdown
更新权重
###Code
X_tensor = torch.from_numpy(X).view(100,1)
Y_tensor = torch.from_numpy(Y).view(100,1)
n_iters = 101
for epoch in range(n_iters):
y_pred = linearModel(X_tensor)
l = loss(Y_tensor, y_pred) # 求误差(注意这里的顺序)
l.backward() # 求梯度
optimizer.step() # 更新权重,即向梯度方向走一步
optimizer.zero_grad() # 清空梯度
[w, b] = linearModel.parameters() # 获得参数
if epoch % 20 == 0:
print(f'epoch {epoch+1}: w = {w.data}, loss = {l.item():.3f}')
print(f'根据训练模型预测, 当x=5时, y的值为: {linearModel(torch.tensor([5.0]))}')
# 绘制预测曲线
y_pre = linearModel(X_tensor).detach().numpy()
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(1,1,1)
ax.scatter(X,Y)
ax.plot(X, y_pre, 'g-', lw=3)
fig.show()
###Output
_____no_output_____ |
breast-cancer-detection.ipynb | ###Markdown
###Code
!pip install tensorflow
!pip install tensorflow-gpu
import pandas as pd
import numpy as np
import os
import keras
import matplotlib.pyplot as plt
from keras.layers import Dense,GlobalAveragePooling2D
from keras.applications import MobileNet
from keras.preprocessing import image
from keras.applications.mobilenet import preprocess_input
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model
from keras.optimizers import Adam
from google.colab import files
base_model=MobileNet(weights='imagenet',include_top=False)
x=base_model.output
x=GlobalAveragePooling2D()(x)
x=Dense(1024,activation='relu')(x) #we add dense layers so that the model can learn more complex functions and classify for better results.
x=Dense(1024,activation='relu')(x) #dense layer 2
x=Dense(512,activation='relu')(x) #dense layer 3
preds=Dense(2,activation='softmax')(x) #final layer with softmax activation
model=Model(inputs=base_model.input,outputs=preds)
for layer in model.layers[:20]:
layer.trainable=False
for layer in model.layers[20:]:
layer.trainable=True
!pip install -U -q kaggle
!mkdir /root/.kaggle
files.upload()
!apt install pv
!unzip -o /content/chest-xray-pneumonia.zip | pv -l >/dev/null
os.remove('chest-xray-pneumonia.zip')
!unzip -o /content/chest_xray.zip | pv -l >/dev/null
os.remove('chest_xray.zip')
base_model=MobileNet(weights='imagenet',include_top=False)
train_datagen=ImageDataGenerator(preprocessing_function=preprocess_input) #included in our dependencies
train_generator=train_datagen.flow_from_directory('/content/chest_xray/train/',
target_size=(224,224),
color_mode='rgb',
batch_size=32,
class_mode='categorical', shuffle=True)
model.compile(optimizer='Adam',loss='categorical_crossentropy',metrics=['accuracy'])
# Adam optimizer
# loss function will be categorical cross entropy
# evaluation metric will be accuracy
step_size_train=train_generator.n//train_generator.batch_size
model.fit_generator(generator=train_generator,
steps_per_epoch=step_size_train,
epochs=5)
!cp kaggle (7).json ~/.kaggle
!kaggle datasets download -d paultimothymooney/chest-xray-pneumonia
!apt install pv
!unzip -o /content/chest-xray-pneumonia.zip | pv -l >/dev/null
os.remove('chest-xray-pneumonia.zip')
!unzip -o /content/chest_xray.zip | pv -l >/dev/null
os.remove('chest_xray.zip')
train_datagen=ImageDataGenerator(preprocessing_function=preprocess_input) #included in our dependencies
train_generator=train_datagen.flow_from_directory('/content/chest_xray/train/',
target_size=(224,224),
color_mode='rgb',
batch_size=32,
class_mode='categorical', shuffle=True)
model.compile(optimizer='Adam',loss='categorical_crossentropy',metrics=['accuracy'])
# Adam optimizer
# loss function will be categorical cross entropy
# evaluation metric will be accuracy
step_size_train=train_generator.n//train_generator.batch_size
model.fit_generator(generator=train_generator,
steps_per_epoch=step_size_train,
epochs=5)
###Output
_____no_output_____ |
notebooks/misc/osmnx_streets/official_examples/10-building-footprints.ipynb | ###Markdown
Building footprint (plus street network) figure-ground diagramsAuthor: [Geoff Boeing](https://geoffboeing.com/)Use OSMnx to download OpenStreetMap building footprints and visualize them as figure-ground diagrams. - [Overview of OSMnx](http://geoffboeing.com/2016/11/osmnx-python-street-networks/) - [GitHub repo](https://github.com/gboeing/osmnx) - [Examples, demos, tutorials](https://github.com/gboeing/osmnx-examples) - [Documentation](https://osmnx.readthedocs.io/en/stable/) - [Journal article/citation](http://geoffboeing.com/publications/osmnx-complex-street-networks/)
###Code
import osmnx as ox
from IPython.display import Image
%matplotlib inline
ox.__version__
# configure the inline image display
img_folder = "images"
extension = "png"
size = 240
# specify that we're retrieving building footprint geometries
tags = {"building": True}
###Output
_____no_output_____
###Markdown
Building footprints within the city limits of Piedmont, California
###Code
gdf = ox.geometries_from_place("Piedmont, California, USA", tags)
gdf_proj = ox.project_gdf(gdf)
fp = f"./{img_folder}/piedmont_bldgs.{extension}"
fig, ax = ox.plot_footprints(gdf_proj, filepath=fp, dpi=400, save=True, show=False, close=True)
Image(fp, height=size, width=size)
# save as a shapefile
gdf_save = gdf.applymap(lambda x: str(x) if isinstance(x, list) else x)
gdf_save.drop(labels="nodes", axis=1).to_file("./data/piedmont_bldgs.gpkg", driver="GPKG")
###Output
_____no_output_____
###Markdown
Now let's analyze the size of the building footprints...
###Code
# calculate the area in projected units (meters) of each building footprint, then display first five
areas = gdf_proj.area
areas.head()
# total area (sq m) covered by building footprints
sum(areas)
# get the total area within Piedmont's admin boundary in sq meters
place = ox.geocode_to_gdf("Piedmont, California, USA")
place_proj = ox.project_gdf(place)
place_proj.area.iloc[0]
# what proportion of piedmont is covered by building footprints?
sum(areas) / place_proj.area.iloc[0]
###Output
_____no_output_____
###Markdown
One and a half square kilometers near the Arc de Triomphe
###Code
point = (48.873446, 2.294255)
dist = 612
gdf = ox.geometries_from_point(point, tags, dist=dist)
gdf_proj = ox.project_gdf(gdf)
bbox = ox.utils_geo.bbox_from_point(point=point, dist=dist, project_utm=True)
fp = f"./{img_folder}/paris_bldgs.{extension}"
fig, ax = ox.plot_footprints(
gdf_proj,
bbox=bbox,
color="w",
filepath=fp,
dpi=90,
save=True,
show=False,
close=True,
)
Image(fp, height=size, width=size)
###Output
_____no_output_____
###Markdown
Street network + building footprints: square-mile visualizationsPlot and save to disk as .png
###Code
# helper funcion to get one-square-mile street networks, building footprints, and plot them
def make_plot(
place,
point,
network_type="drive",
dpi=40,
dist=805,
default_width=4,
street_widths=None,
):
fp = f"./{img_folder}/{place}.{extension}"
gdf = ox.geometries_from_point(point, tags, dist=dist)
fig, ax = ox.plot_figure_ground(
point=point,
dist=dist,
network_type=network_type,
default_width=default_width,
street_widths=street_widths,
save=False,
show=False,
close=True,
)
fig, ax = ox.plot_footprints(
gdf, ax=ax, filepath=fp, dpi=dpi, save=True, show=False, close=True
)
place = "portland_buildings"
point = (45.517309, -122.682138)
make_plot(place, point)
place = "richmond_district_buildings"
point = (37.781999, -122.472501)
make_plot(place, point)
place = "port_au_prince_buildings"
point = (18.522240, -72.347607)
make_plot(place, point, network_type="all", default_width=1, street_widths={"secondary": 3})
place = "monrovia_liberia_buildings"
point = (6.340236, -10.747255)
make_plot(place, point, network_type="all", default_width=2, street_widths={"primary": 6})
###Output
_____no_output_____ |
src/DocumentSimilarity/.ipynb_checkpoints/Cosine-Similarity-checkpoint.ipynb | ###Markdown
Cosine-Similarity One of the fundamental goals of this project is to find a way to quantify the similarities between the budget documents. A common approach to match similar documents is a one that is based on counting the maximum number of common words between the documents. However, this approach is considered not efficient as the number of common words tends to increase as the size of the document increases. For we want an efficient approach to achieve our goal, cosine similarity is selected to determine the similarity between the documents irrespective of their sizes.The steps followed to achieve the our goal are:**1. Define the documents,****2. Vectorize,****3. Compute cosine similarity,****4. Visualize the results.** Importing required libraries
###Code
import os
import re
import nltk
import string
import numpy as np
import pandas as pd
from nltk.corpus import stopwords
from nltk.stem import LancasterStemmer
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import CountVectorizer
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
from matplotlib import style
# Change the dirctory for file to be read
os.chdir(r"C:\Users\Sultan\Desktop\data\PreprocessedData")
###Output
_____no_output_____
###Markdown
1. Define the documents Convert the data to a pd dataframe
###Code
# Read the data file
df = pd.read_csv(r'CombinedData.csv', engine='python')
# Take a look at the dataframe
df.head()
# Rename col 0
df.columns = ['word','organization','year']
df.head()
# We could uncomment this and see how the files from 2020 are similar to one another
# Now let's select the data entries from columns 'word' and 'organization' that
# has the label FY2020 and ignore the rest
#df = df.loc[df['year'] == 'FY2020', ['word', 'organization', 'year']]
#df.head()
# Use loc to slice and extract data for each org.
gc_df = df.loc[df['organization'] == 'Guilford County', ['word', 'organization']]
cc_df = df.loc[df['organization'] == 'Charlotte City', ['word', 'organization']]
dcity_df = df.loc[df['organization'] == 'Durham City', ['word', 'organization']]
dcounty_df = df.loc[df['organization'] == 'Durham County', ['word', 'organization']]
mc_df = df.loc[df['organization'] == 'Mecklenburg County', ['word', 'organization']]
rc_df = df.loc[df['organization'] == 'Raleigh City', ['word', 'organization']]
wc_df = df.loc[df['organization'] == 'Wake County', ['word', 'organization']]
# Create series out of the word col
corpus = gc_df.word
corpus1 = cc_df.word
corpus2 = dcity_df.word
corpus3 = dcounty_df.word
corpus4 = mc_df.word
corpus5 = rc_df.word
corpus6 = wc_df.word
# For each text, join the elements and sperate them by a single space
gc_text = ' '.join(word for word in corpus)
cc_text = ' '.join(word for word in corpus1)
dcity_text = ' '.join(word for word in corpus2)
dcounty_text = ' '.join(word for word in corpus3)
mc_text = ' '.join(word for word in corpus4)
rc_text = ' '.join(word for word in corpus5)
wc_text = ' '.join(word for word in corpus6)
###Output
_____no_output_____
###Markdown
Here the documents are defined and ready to be victorize
###Code
# List of documents
documents = [gc_text, cc_text, dcity_text, dcounty_text, mc_text, rc_text, wc_text]
###Output
_____no_output_____
###Markdown
2. Vectorize
###Code
# Create dtm == 'document term matrix'
count_vectorizer = CountVectorizer(stop_words='english')
count_vectorizer = CountVectorizer()
sparse_matrix = count_vectorizer.fit_transform(documents)
# Convert sparse matrix to a dataframe to see the word frequencies.
dtm = sparse_matrix.todense()
df = pd.DataFrame(dtm,
columns=count_vectorizer.get_feature_names(),
index=['gc_text', 'cc_text', 'dcity_text', 'dcounty_text', 'mc_text', 'rc_text', 'wc_text'])
df
###Output
_____no_output_____
###Markdown
3. Compute cosine similarity
###Code
from sklearn.metrics.pairwise import cosine_similarity
# Take the cosine sim. between dataframe and it's self
data = cosine_similarity(df, df)
type(data)
len(data)
# How similar is gc budget document to the others
data[:1]
###Output
_____no_output_____
###Markdown
4. Visualize
###Code
# Plot the cosine similiriaty
fig, ax = plt.subplots(figsize=(12,8))
plt.plot(data)
plt.show()
import seaborn as sns
# Plot count of text data grouped by year
fig, ax = plt.subplots(figsize=(12,8))
ax = sns.heatmap(data, linewidth = 0.5)
plt.show()
###Output
_____no_output_____ |
notebooks_workflow_complete/2.0_setup_sensitivity_analysis.ipynb | ###Markdown
We want to set up for global sensitivity analysis. NOTE: Make sure `run_ensemble` is set appropriately - If `run_ensemble` is set to `True`, local runs are performed. If `run_ensemble` set to `False`results from the journal article are used.
###Code
run_ensemble=False
pst_root = 'prior_mc_wide'
if run_ensemble==True:
pst = pyemu.Pst(f'../noptmax0_testing/{pst_root}.pst')
else:
pst = pyemu.Pst(f'../output/noptmax0/{pst_root}.pst')
output_dir = '../run_data'
###Output
_____no_output_____
###Markdown
set `tie_by_group` to `True`. Also update the lower bound for CHD parameters to nearly 1.0 (because many of these values are at or near the bottom of model cells and if sampling sets them below the bottom of the cell, MODFLOW6 will not run). Also unfix the CHD parameters so they will be evaluated. All other defailts. for `pestpp-sen` will be accepted.
###Code
pst.pestpp_options['tie_by_group'] = True
pst.parameter_data.loc[pst.parameter_data.pargp=='chd', 'parlbnd'] = 0.999999
pst.parameter_data.partrans = 'log'
pst.parameter_data.partrans.unique()
pst.write(f'../run_data/{pst_root}_sens.pst')
###Output
noptmax:0, npar_adj:5087, nnz_obs:525
###Markdown
If `run_ensemble=True` the cell below will run a local `prior_mc_wide_sens` global sensitivity analysis* **NOTE: must have the `pestpp-sen` executable in system path or in `../run_data/`*** same process as in notebooks 1.0 and 1.3 for parallel run* for this `pestpp-sen`run, the total number of model runs is 64, which is = of parameter groups * 4* will run in parallel locally using the number of cores specified below by `num_workers`* creates a new directory called `"../master_sen/"` which is a copy of run_data* while running generates worker directories that are removed when run is complete* results moved to `"../run_data/"`
###Code
if run_ensemble==True:
# set some variables for starting a group of PEST++ workers on the local machine
# MAKE SURE THAT PESTPP-IES and MF6 executables are in your system path or are in '../run_data'
num_workers = 5 # number of local workers -- VERY IMPORTANT, DO NOT MAKE TOO BIG
if sys.platform == 'win32':
pst_exe = 'pestpp-sen.exe'
else:
pst_exe = 'pestpp-sen'
template_ws = '../run_data' # template_directory
m_d = '../master_sen'
pyemu.os_utils.start_workers(worker_dir=template_ws,
exe_rel_path=pst_exe,
pst_rel_path=f'{pst_root}_sens.pst',
num_workers=num_workers,
master_dir=m_d
)
if run_ensemble==True:
# move results into run_data and clean up
move_result_files = glob.glob(os.path.join(m_d, f'{pst_root}_sens*'))
move_result_files = [f for f in move_result_files if 'pst' not in f]
[shutil.copy(os.path.join(m_d, file), output_dir) for file in move_result_files]
# Remove master dir.
shutil.rmtree(m_d)
###Output
_____no_output_____ |
Exercises/ch03.ipynb | ###Markdown
23 > Are you able to write a regular expression to tokenize text in such a way that the word _don’t_ is tokenized into _do_ and _n’t_? Explain why this regular expression won’t work: `«n't|\w+»`.
###Code
import nltk
nltk.regexp_tokenize('don\'t', r'((?:\w+(?=n\'t))|n\'t)')
###Output
_____no_output_____
###Markdown
25 > _Pig Latin_ is a simple transformation of English text. Each word of the text is converted as follows: move any consonant (or consonant cluster) that appears at the start of the word to the end, then append _ay_, e.g., _string_ → _ingstray_, _idle_ → _idleay_ (see http://en.wikipedia.org/wiki/Pig_Latin).>> a. Write a function to convert a word to Pig Latin.>> b. Write code that converts text, instead of individual words.>> c. Extend it further to preserve capitalization, to keep `qu` together (so that `quiet` becomes `ietquay`, for example), and to detect when `y` is used as a consonant (e.g., `yellow`) versus a vowel (e.g., `style`).
###Code
def to_pig_latin(word):
import re
idx = re.search('[aeiou]', word)
if idx is not None:
ret = word[idx.span()[0]:] + word[0:idx.span()[0]] + 'ay'
else:
ret = word
return ret
to_pig_latin('string')
def to_pig_latin_text(text):
return [to_pig_latin(word) for word in nltk.word_tokenize(text)]
to_pig_latin_text('The Project Gutenberg EBook of Crime and Punishment, by Fyodor Dostoevsky')
###Output
_____no_output_____
###Markdown
29 > Readability measures are used to score the reading difficulty of a text, for the purposes of selecting texts of appropriate difficulty for language learners. Let us define $\mu_w$ to be the average number of letters per word, and $\mu_s$ to be the average number of words per sentence, in a given text. The Automated Readability Index (ARI) of the text is defined to be: 4.71 $\mu_w$ + 0.5 $\mu_s$ - 21.43. Compute the ARI score for various sections of the Brown Corpus, including section `f` (popular lore) and `j` (learned). Make use of the fact that `nltk.corpus.brown.words()` produces a sequence of words, whereas `nltk.corpus.brown.sents()` produces a sequence of sentences.
###Code
def cal_ARI():
words = nltk.corpus.brown.words(categories=['lore', 'learned'])
sents = nltk.corpus.brown.sents(categories=['lore', 'learned'])
w = sum([len(word) for word in words]) / len(words)
s = len(words) / len(sents)
return 4.71 * w + 0.5 * s - 21.43
cal_ARI()
###Output
_____no_output_____
###Markdown
38 > An interesting challenge for tokenization is words that have been split across a linebreak. E.g., if _long-term_ is split, then we have the string `long-\nterm`.>> a. Write a regular expression that identifies words that are hyphenated at a linebreak. The expression will need to include the `\n` character.>> b. Use `re.sub()` to remove the `\n` character from these words.>> c. How might you identify words that should not remain hyphenated once the newline is removed, e.g., `'encyclo-\npedia'`?
###Code
import re
re.search(r'[-\w]+\n[-\w]+', 'long-\nterm')
re.sub(r'([-\w]+)\n([-\w]+)', r'\1\2', 'long-\nterm')
words = ['attribution', 'confabulation', 'elocution',
'sequoia', 'tenacious', 'unidirectional']
vsequences = set([''.join(re.findall(r'[aeiou]', word)) for word in words])
sorted(vsequences)
re.findall(r'[aeiou]', 'attribution')
###Output
_____no_output_____ |
mslearn-aml-labs/.ipynb_checkpoints/12-Monitoring_a_Model-checkpoint.ipynb | ###Markdown
Monitoring a ModelWhen you've deployed a model into production as a service, you'll want to monitor it to track usage and explore the requests it processes. You can use Azure Application Insights to monitor activity for a model service endpoint. Before You StartBefore you start this lab, ensure that you have completed the *Create an Azure Machine Learning Workspace* and *Create a Compute Instance* tasks in [Lab 1: Getting Started with Azure Machine Learning](./labdocs/Lab01.md). Then open this notebook in Jupyter on your Compute Instance. Connect to Your WorkspaceThe first thing you need to do is to connect to your workspace using the Azure ML SDK.> **Note**: You may be prompted to authenticate. Just copy the code and click the link provided to sign into your Azure subscription, and then return to this notebook.
###Code
from azureml.core import Workspace
# Load the workspace from the saved config file
ws = Workspace.from_config()
print('Ready to work with', ws.name)
###Output
Ready to work with nikhilsuthardp100
###Markdown
Prepare a Model for DeploymentNow we need a model to deploy. Run the code below to:1. Create and register a dataset.2. Train a model using the dataset.3. Register the model.
###Code
from azureml.core import Experiment
from azureml.core import Model
import pandas as pd
import numpy as np
import joblib
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from azureml.core import Dataset
# Upload data files to the default datastore
default_ds = ws.get_default_datastore()
default_ds.upload_files(files=['./data/diabetes.csv', './data/diabetes2.csv'],
target_path='diabetes-data/',
overwrite=True,
show_progress=True)
#Create a tabular dataset from the path on the datastore
print('Creating dataset...')
data_set = Dataset.Tabular.from_delimited_files(path=(default_ds, 'diabetes-data/*.csv'))
# Register the tabular dataset
print('Registering dataset...')
try:
data_set = data_set.register(workspace=ws,
name='diabetes dataset',
description='diabetes data',
tags = {'format':'CSV'},
create_new_version=True)
except Exception as ex:
print(ex)
# Create an Azure ML experiment in your workspace
experiment = Experiment(workspace = ws, name = "diabetes-training")
run = experiment.start_logging()
print("Starting experiment:", experiment.name)
# load the diabetes dataset
print("Loading Data...")
diabetes = data_set.to_pandas_dataframe()
# Separate features and labels
X, y = diabetes[['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']].values, diabetes['Diabetic'].values
# Split data into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0)
# Train a decision tree model
print('Training a decision tree model')
model = DecisionTreeClassifier().fit(X_train, y_train)
# calculate accuracy
y_hat = model.predict(X_test)
acc = np.average(y_hat == y_test)
print('Accuracy:', acc)
run.log('Accuracy', np.float(acc))
# calculate AUC
y_scores = model.predict_proba(X_test)
auc = roc_auc_score(y_test,y_scores[:,1])
print('AUC: ' + str(auc))
run.log('AUC', np.float(auc))
# Save the trained model
model_file = 'diabetes_model.pkl'
joblib.dump(value=model, filename=model_file)
run.upload_file(name = 'outputs/' + model_file, path_or_stream = './' + model_file)
# Complete the run
run.complete()
# Register the model
print('Registering model...')
run.register_model(model_path='outputs/diabetes_model.pkl', model_name='diabetes_model',
tags={'Training context':'Inline Training'},
properties={'AUC': run.get_metrics()['AUC'], 'Accuracy': run.get_metrics()['Accuracy']})
# Get the registered model
model = ws.models['diabetes_model']
print('Model trained and registered.')
###Output
Uploading an estimated of 2 files
Uploading ./data/diabetes.csv
Uploaded ./data/diabetes.csv, 1 files out of an estimated total of 2
Uploading ./data/diabetes2.csv
Uploaded ./data/diabetes2.csv, 2 files out of an estimated total of 2
Uploaded 2 files
Creating dataset...
Registering dataset...
###Markdown
Deploy a Model as a Web ServiceNow you're ready to deploy the registered model as a web service.First, create a folder for the deployment configuration files
###Code
import os
folder_name = 'diabetes_service'
# Create a folder for the web service files
experiment_folder = './' + folder_name
os.makedirs(experiment_folder, exist_ok=True)
print(folder_name, 'folder created.')
# Set path for scoring script
script_file = os.path.join(experiment_folder,"score_diabetes.py")
###Output
_____no_output_____
###Markdown
Now you need an entry script that the service will use to score new data.
###Code
%%writefile $script_file
import json
import joblib
import numpy as np
from azureml.core.model import Model
# Called when the service is loaded
def init():
global model
# Get the path to the deployed model file and load it
model_path = Model.get_model_path('diabetes_model')
model = joblib.load(model_path)
# Called when a request is received
def run(raw_data):
# Get the input data as a numpy array
data = json.loads(raw_data)['data']
np_data = np.array(data)
# Get a prediction from the model
predictions = model.predict(np_data)
# print the data and predictions (so they'll be logged!)
log_text = 'Data:' + str(data) + ' - Predictions:' + str(predictions)
print(log_text)
# Get the corresponding classname for each prediction (0 or 1)
classnames = ['not-diabetic', 'diabetic']
predicted_classes = []
for prediction in predictions:
predicted_classes.append(classnames[prediction])
# Return the predictions as JSON
return json.dumps(predicted_classes)
###Output
_____no_output_____
###Markdown
You'll also need a Conda configuration file for the service environment.
###Code
from azureml.core.conda_dependencies import CondaDependencies
# Add the dependencies for our model (AzureML defaults is already included)
myenv = CondaDependencies()
myenv.add_conda_package("scikit-learn")
# Save the environment config as a .yml file
env_file = folder_name + "/diabetes_env.yml"
with open(env_file,"w") as f:
f.write(myenv.serialize_to_string())
print("Saved dependency info in", env_file)
# Print the .yml file
with open(env_file,"r") as f:
print(f.read())
###Output
_____no_output_____
###Markdown
Now you can deploy the service (in this case, as an Azure Container Instance (ACI).> **Note**: This can take a few minutes - wait until the state is shown as **Healthy**.
###Code
from azureml.core.webservice import AciWebservice, Webservice
from azureml.core.model import Model
from azureml.core.model import InferenceConfig
# Configure the scoring environment
inference_config = InferenceConfig(runtime= "python",
entry_script=script_file,
conda_file=env_file)
service_name = "diabetes-service-app-insights"
deployment_config = AciWebservice.deploy_configuration(cpu_cores = 1, memory_gb = 1)
aci_service = Model.deploy(workspace=ws,
name= service_name,
models= [model],
inference_config= inference_config,
deployment_config=deployment_config)
aci_service.wait_for_deployment(show_output = True)
print(aci_service.state)
###Output
_____no_output_____
###Markdown
Enable Application InsightsNext, you need to enable Application Insights for the service.
###Code
# Enable AppInsights
aci_service.update(enable_app_insights=True)
print('AppInsights enabled!')
###Output
_____no_output_____
###Markdown
Use the Web ServiceWith the service deployed, now you can consume it from a client application.First, determine the URL to which these applications must submit their requests.
###Code
endpoint = aci_service.scoring_uri
print(endpoint)
###Output
_____no_output_____
###Markdown
Now that you know the endpoint URI, an application can simply make an HTTP request, sending the patient data in JSON (or binary) format, and receive back the predicted class(es).> **Tip**: If an error occurs because the service endpoint isn't ready. Wait a few seconds and try again!
###Code
import requests
import json
# Create new data for inferencing
x_new = [[2,180,74,24,21,23.9091702,1.488172308,22],
[0,148,58,11,179,39.19207553,0.160829008,45]]
# Convert the array to a serializable list in a JSON document
input_json = json.dumps({"data": x_new})
# Set the content type
headers = { 'Content-Type':'application/json' }
# Get the predictions
predictions = requests.post(endpoint, input_json, headers = headers)
print(predictions.status_code)
if predictions.status_code == 200:
predicted_classes = json.loads(predictions.json())
for i in range(len(x_new)):
print ("Patient {}".format(x_new[i]), predicted_classes[i] )
###Output
_____no_output_____
###Markdown
Now you can view the data logged for the service endpoint:1. In the [Azure portal](https://portal.azure.com), open your Machine Learning workspace.2. On the **Overview** page, click the link for the associated **Application Insights** resource.3. On the Application Insights blade, click **Logs**. > **Note**: If this is the first time you've opened log analytics, you may need to click **Get Started** to open the query editor. If a tip explaining how to write a query is displayed, close it.4. Paste the following query into the query editor and click **Run** ``` traces |where message == "STDOUT" and customDimensions.["Service Name"] == "diabetes-service-app-insights" |project timestamp, customDimensions.Content ```5. View the results. At first there may be none, because an ACI web service can take two to three minutes to send the telemetry to Application Insights. Wait a few minutes and re-run the query until you see the logged data and predictions. Delete the ServiceWhen you no longer need your service, you should delete it to avoid incurring unecessary charges.
###Code
aci_service.delete()
print('Service deleted.')
###Output
_____no_output_____ |
1.DeepLearning/00.Artifical_Neuron/single_neuron_2.ipynb | ###Markdown
단일 뉴런 (Single Neuron) - 다중 입력 Gate Neuron
###Code
import numpy as np
import random
import math
class GateNeuron:
def __init__(self):
self.w = np.array([0.0, 0.0]) # weight of one input
self.b = np.array([0.0]) # bias
print("Initial w: {0}, b: {1}".format(self.w, self.b))
def u(self, x):
return np.dot(self.w, x) + self.b
def f(self, u):
return max(0.0, u)
def z(self, x):
u = self.u(x)
return self.f(u)
def squared_error(self, x, z_target):
return 1.0 / 2.0 * math.pow(self.z(x) - z_target, 2)
def numerical_f_derivative(self, u):
delta = 1e-4 # 0.0001
return (self.f(u + delta) - self.f(u - delta)) / (2 * delta)
def d_E_over_d_w(self, input, z_target):
u = self.u(input)
z = self.f(u)
error = z - z_target
return error * self.numerical_f_derivative(u) * input
def d_E_over_d_b(self, input, z_target):
u = self.u(input)
z = self.f(u)
error = z - z_target
return error * self.numerical_f_derivative(u)
def learning(self, alpha, maxEpoch, data):
for i in range(maxEpoch):
for idx in range(data.numTrainData):
x = data.training_input_value[idx]
z_target = data.training_z_target[idx]
self.w = self.w - alpha * self.d_E_over_d_w(input, z_target)
self.b = self.b - alpha * self.d_E_over_d_b(input, z_target)
sum = 0.0
for idx in range(data.numTrainData):
sum = sum + self.squared_error(data.training_input_value[idx], data.training_z_target[idx])
print("Epoch {0}: Error: {1}, w: {2}, b: {3}".format(i, sum / data.numTrainData, self.w, self.b))
###Output
_____no_output_____
###Markdown
1. And Gate
###Code
class Data:
def __init__(self):
self.training_input_value = np.array([(0.0, 0.0), (1.0, 0.0), (0.0, 1.0), (1.0, 1.0)])
self.training_z_target = np.array([0.0, 0.0, 0.0, 1.0])
self.numTrainData = len(self.training_input_value)
if __name__ == '__main__':
n = GateNeuron()
d = Data()
for idx in range(d.numTrainData):
input = d.training_input_value[idx]
z = n.z(input)
z_target = d.training_z_target[idx]
error = n.squared_error(input, z_target)
print("x: {0}, z: {1}, z_target: {2}, error: {3}".format(input, n.z(input), z_target, error))
n.learning(0.1, 100, d)
for idx in range(d.numTrainData):
input = d.training_input_value[idx]
z = n.z(input)
z_target = d.training_z_target[idx]
error = n.squared_error(input, z_target)
print("x: {0}, z: {1}, z_target: {2}, error: {3}".format(input, n.z(input), z_target, error))
###Output
Initial w: [ 0. 0.], b: [ 0.]
x: [ 0. 0.], z: 0.0, z_target: 0.0, error: 0.0
x: [ 1. 0.], z: 0.0, z_target: 0.0, error: 0.0
x: [ 0. 1.], z: 0.0, z_target: 0.0, error: 0.0
x: [ 1. 1.], z: 0.0, z_target: 1.0, error: 0.5
Epoch 0: Error: 0.08792499999999971, w: [ 0.05 0.05], b: [ 0.09]
Epoch 1: Error: 0.07136089779860384, w: [ 0.10037869 0.10037869], b: [ 0.13030295]
Epoch 2: Error: 0.06852770435326894, w: [ 0.11391738 0.11391738], b: [ 0.14113391]
Epoch 3: Error: 0.06788318451022468, w: [ 0.11755575 0.11755575], b: [ 0.1440446]
Epoch 4: Error: 0.06771841746594348, w: [ 0.11853352 0.11853352], b: [ 0.14482682]
Epoch 5: Error: 0.06767474776897946, w: [ 0.11879629 0.11879629], b: [ 0.14503703]
Epoch 6: Error: 0.06766305606026743, w: [ 0.1188669 0.1188669], b: [ 0.14509352]
Epoch 7: Error: 0.06765991722664656, w: [ 0.11888588 0.11888588], b: [ 0.1451087]
Epoch 8: Error: 0.06765907393063264, w: [ 0.11889098 0.11889098], b: [ 0.14511278]
Epoch 9: Error: 0.06765884732105898, w: [ 0.11889235 0.11889235], b: [ 0.14511388]
Epoch 10: Error: 0.06765878642352607, w: [ 0.11889272 0.11889272], b: [ 0.14511418]
Epoch 11: Error: 0.06765877005809727, w: [ 0.11889282 0.11889282], b: [ 0.14511425]
Epoch 12: Error: 0.06765876566008176, w: [ 0.11889284 0.11889284], b: [ 0.14511428]
Epoch 13: Error: 0.06765876447816584, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 14: Error: 0.06765876416053951, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 15: Error: 0.06765876407518104, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 16: Error: 0.06765876405224194, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 17: Error: 0.06765876404607732, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 18: Error: 0.06765876404442066, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 19: Error: 0.06765876404397544, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 20: Error: 0.06765876404385582, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 21: Error: 0.06765876404382365, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 22: Error: 0.06765876404381502, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 23: Error: 0.06765876404381269, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 24: Error: 0.06765876404381205, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 25: Error: 0.0676587640438119, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 26: Error: 0.06765876404381185, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 27: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 28: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 29: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 30: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 31: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 32: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 33: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 34: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 35: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 36: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 37: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 38: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 39: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 40: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 41: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 42: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 43: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 44: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 45: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 46: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 47: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 48: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 49: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 50: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 51: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 52: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 53: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 54: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 55: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 56: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 57: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 58: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 59: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 60: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 61: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 62: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 63: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 64: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 65: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 66: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 67: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 68: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 69: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 70: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 71: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 72: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 73: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 74: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 75: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 76: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 77: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 78: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 79: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 80: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 81: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 82: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 83: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 84: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 85: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 86: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 87: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 88: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 89: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 90: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 91: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 92: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 93: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 94: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 95: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 96: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 97: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 98: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
Epoch 99: Error: 0.06765876404381183, w: [ 0.11889285 0.11889285], b: [ 0.14511428]
x: [ 0. 0.], z: [ 0.14511428], z_target: 0.0, error: 0.010529077654371036
x: [ 1. 0.], z: [ 0.26400714], z_target: 0.0, error: 0.034849884492072145
x: [ 0. 1.], z: [ 0.26400714], z_target: 0.0, error: 0.034849884492072145
x: [ 1. 1.], z: [ 0.38289999], z_target: 1.0, error: 0.19040620953673199
###Markdown
2. Or Gate
###Code
class Data:
def __init__(self):
self.training_input_value = np.array([(0.0, 0.0), (1.0, 0.0), (0.0, 1.0), (1.0, 1.0)])
self.training_z_target = np.array([0.0, 1.0, 1.0, 1.0])
self.numTrainData = len(self.training_input_value)
if __name__ == '__main__':
n = GateNeuron()
d = Data()
for idx in range(d.numTrainData):
input = d.training_input_value[idx]
z = n.z(input)
z_target = d.training_z_target[idx]
error = n.squared_error(input, z_target)
print("x: {0}, z: {1}, z_target: {2}, error: {3}".format(input, n.z(input), z_target, error))
n.learning(0.1, 100, d)
for idx in range(d.numTrainData):
input = d.training_input_value[idx]
z = n.z(input)
z_target = d.training_z_target[idx]
error = n.squared_error(input, z_target)
print("x: {0}, z: {1}, z_target: {2}, error: {3}".format(input, n.z(input), z_target, error))
###Output
Initial w: [ 0. 0.], b: [ 0.]
x: [ 0. 0.], z: 0.0, z_target: 0.0, error: 0.0
x: [ 1. 0.], z: 0.0, z_target: 1.0, error: 0.5
x: [ 0. 1.], z: 0.0, z_target: 1.0, error: 0.5
x: [ 1. 1.], z: 0.0, z_target: 1.0, error: 0.5
Epoch 0: Error: 0.11990145668800876, w: [ 0.18932 0.18932], b: [ 0.201456]
Epoch 1: Error: 0.07167508547047359, w: [ 0.26165934 0.26165934], b: [ 0.25932748]
Epoch 2: Error: 0.0620513132581235, w: [ 0.28109972 0.28109972], b: [ 0.27487977]
Epoch 3: Error: 0.0597059989457013, w: [ 0.28632409 0.28632409], b: [ 0.27905927]
Epoch 4: Error: 0.059093125101371886, w: [ 0.28772808 0.28772808], b: [ 0.28018247]
Epoch 5: Error: 0.05892967908610881, w: [ 0.28810539 0.28810539], b: [ 0.28048431]
Epoch 6: Error: 0.05888584560732623, w: [ 0.28820679 0.28820679], b: [ 0.28056543]
Epoch 7: Error: 0.05887407241666068, w: [ 0.28823404 0.28823404], b: [ 0.28058723]
Epoch 8: Error: 0.0588709089797813, w: [ 0.28824136 0.28824136], b: [ 0.28059309]
Epoch 9: Error: 0.058870058876500715, w: [ 0.28824333 0.28824333], b: [ 0.28059466]
Epoch 10: Error: 0.05886983042343856, w: [ 0.28824386 0.28824386], b: [ 0.28059509]
Epoch 11: Error: 0.05886976902946993, w: [ 0.288244 0.288244], b: [ 0.2805952]
Epoch 12: Error: 0.05886975253055608, w: [ 0.28824404 0.28824404], b: [ 0.28059523]
Epoch 13: Error: 0.05886974809666264, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 14: Error: 0.05886974690510459, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 15: Error: 0.05886974658488697, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 16: Error: 0.05886974649883219, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 17: Error: 0.058869746475705906, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 18: Error: 0.05886974646949103, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 19: Error: 0.05886974646782081, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 20: Error: 0.05886974646737197, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 21: Error: 0.058869746467251356, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 22: Error: 0.05886974646721893, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 23: Error: 0.05886974646721025, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 24: Error: 0.058869746467207905, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 25: Error: 0.058869746467207267, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 26: Error: 0.05886974646720711, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 27: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 28: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 29: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 30: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 31: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 32: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 33: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 34: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 35: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 36: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 37: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 38: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 39: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 40: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 41: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 42: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 43: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 44: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 45: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 46: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 47: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 48: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 49: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 50: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 51: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 52: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 53: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 54: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 55: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 56: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 57: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 58: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 59: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 60: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 61: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 62: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 63: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 64: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 65: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 66: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 67: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 68: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 69: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 70: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 71: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 72: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 73: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 74: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 75: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 76: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 77: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 78: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 79: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 80: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 81: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 82: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 83: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 84: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 85: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 86: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 87: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 88: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 89: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 90: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 91: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 92: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 93: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 94: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 95: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 96: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 97: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 98: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
Epoch 99: Error: 0.05886974646720705, w: [ 0.28824405 0.28824405], b: [ 0.28059524]
x: [ 0. 0.], z: [ 0.28059524], z_target: 0.0, error: 0.03936684463945226
x: [ 1. 0.], z: [ 0.56883929], z_target: 1.0, error: 0.0929497779415466
x: [ 0. 1.], z: [ 0.56883929], z_target: 1.0, error: 0.0929497779415466
x: [ 1. 1.], z: [ 0.85708334], z_target: 1.0, error: 0.01021258534628274
###Markdown
3. XOR Gate
###Code
class Data:
def __init__(self):
self.training_input_value = np.array([(0.0, 0.0), (1.0, 0.0), (0.0, 1.0), (1.0, 1.0)])
self.training_z_target = np.array([0.0, 1.0, 1.0, 0.0])
self.numTrainData = len(self.training_input_value)
if __name__ == '__main__':
n = GateNeuron()
d = Data()
for idx in range(d.numTrainData):
input = d.training_input_value[idx]
z = n.z(input)
z_target = d.training_z_target[idx]
error = n.squared_error(input, z_target)
print("x: {0}, z: {1}, z_target: {2}, error: {3}".format(input, n.z(input), z_target, error))
n.learning(0.1, 100, d)
for idx in range(d.numTrainData):
input = d.training_input_value[idx]
z = n.z(input)
z_target = d.training_z_target[idx]
error = n.squared_error(input, z_target)
print("x: {0}, z: {1}, z_target: {2}, error: {3}".format(input, n.z(input), z_target, error))
###Output
Initial w: [ 0. 0.], b: [ 0.]
x: [ 0. 0.], z: 0.0, z_target: 0.0, error: 0.0
x: [ 1. 0.], z: 0.0, z_target: 1.0, error: 0.5
x: [ 0. 1.], z: 0.0, z_target: 1.0, error: 0.5
x: [ 1. 1.], z: 0.0, z_target: 0.0, error: 0.0
Epoch 0: Error: 0.1688197766880002, w: [ 0.08932 0.08932], b: [ 0.121456]
Epoch 1: Error: 0.15104628360517683, w: [ 0.13478549 0.13478549], b: [ 0.15782839]
Epoch 2: Error: 0.14758783650673266, w: [ 0.14700382 0.14700382], b: [ 0.16760305]
Epoch 3: Error: 0.1467536031738944, w: [ 0.15028735 0.15028735], b: [ 0.17022988]
Epoch 4: Error: 0.1465362867920789, w: [ 0.15116977 0.15116977], b: [ 0.17093581]
Epoch 5: Error: 0.1464783819641094, w: [ 0.15140691 0.15140691], b: [ 0.17112552]
Epoch 6: Error: 0.14646285655882632, w: [ 0.15147063 0.15147063], b: [ 0.17117651]
Epoch 7: Error: 0.1464586868732158, w: [ 0.15148776 0.15148776], b: [ 0.17119021]
Epoch 8: Error: 0.1464575665049202, w: [ 0.15149236 0.15149236], b: [ 0.17119389]
Epoch 9: Error: 0.1464572654322638, w: [ 0.1514936 0.1514936], b: [ 0.17119488]
Epoch 10: Error: 0.14645718452340706, w: [ 0.15149393 0.15149393], b: [ 0.17119515]
Epoch 11: Error: 0.14645716278014786, w: [ 0.15149402 0.15149402], b: [ 0.17119522]
Epoch 12: Error: 0.14645715693690078, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 13: Error: 0.14645715536659537, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 14: Error: 0.14645715494459377, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 15: Error: 0.14645715483118565, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 16: Error: 0.14645715480070853, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 17: Error: 0.14645715479251814, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 18: Error: 0.1464571547903171, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 19: Error: 0.14645715478972557, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 20: Error: 0.14645715478956658, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 21: Error: 0.14645715478952387, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 22: Error: 0.1464571547895124, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 23: Error: 0.14645715478950933, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 24: Error: 0.1464571547895085, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 25: Error: 0.1464571547895083, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 26: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 27: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 28: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 29: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 30: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 31: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 32: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 33: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 34: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 35: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 36: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 37: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 38: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 39: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 40: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 41: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 42: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 43: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 44: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 45: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 46: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 47: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 48: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 49: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 50: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 51: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 52: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 53: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 54: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 55: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 56: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 57: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 58: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 59: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 60: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 61: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 62: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 63: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 64: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 65: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 66: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 67: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 68: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 69: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 70: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 71: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 72: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 73: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 74: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 75: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 76: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 77: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 78: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 79: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 80: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 81: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 82: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 83: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 84: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 85: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 86: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 87: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 88: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 89: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 90: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 91: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 92: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 93: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 94: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 95: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 96: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 97: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 98: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
Epoch 99: Error: 0.1464571547895082, w: [ 0.15149405 0.15149405], b: [ 0.17119524]
x: [ 0. 0.], z: [ 0.17119524], z_target: 0.0, error: 0.014653905632269279
x: [ 1. 0.], z: [ 0.3226893], z_target: 1.0, error: 0.2293748941962165
x: [ 0. 1.], z: [ 0.3226893], z_target: 1.0, error: 0.2293748941962165
x: [ 1. 1.], z: [ 0.47418335], z_target: 0.0, error: 0.11242492513333044
|
word_cloud/Mining_Twitter-Copy1.ipynb | ###Markdown
Then from your terminal, execute this script with output piped to a text file: your_script.py > tweets_data.txt Then run this script below to create a Python dataframe of the tweets data
###Code
%matplotlib inline
import json
import pandas as pd
import matplotlib.pyplot as plt
from os import path
pd.set_option("display.max_rows",1000)
pd.set_option("display.max_columns",20)
pd.set_option("display.max_colwidth",150)
d = path.dirname('/home/pybokeh/temp/')
#text = open(path.join(d, 'twitter_data.txt')).read()
tweets_data = []
tweets_file = open(path.join(d, 'clinton.txt'),'r')
for line in tweets_file:
try:
tweet = json.loads(line)
if len(tweet) > 10:
tweets_data.append(tweet)
except:
continue
print(len(tweets_data))
tweets = pd.DataFrame()
tweets['text'] = [tweet['text'] for tweet in tweets_data]
tweets['lang'] = [tweet['lang'] for tweet in tweets_data]
tweets['sample'] = [tweet['text'][:20] for tweet in tweets_data]
tweets['retweeted'] = [tweet['retweeted'] for tweet in tweets_data]
english_tweets = tweets[(tweets['lang']=='en') & (tweets['retweeted']==False)]
english_tweets = english_tweets.drop_duplicates(subset='text');
criteria1 = english_tweets['text'].str.contains("Hillary")
criteria2 = english_tweets['text'].str.contains("hillary")
criteria3 = english_tweets['text'].str.contains("hilary")
english_tweets = english_tweets[criteria1 | criteria2 | criteria3]
english_tweets
text = ''
for line in english_tweets['text']:
text = text + ' ' + line.replace("'s", " ")
%matplotlib inline
from os import path
from scipy.misc import imread
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS
d = path.dirname('/home/pybokeh/Downloads/')
# Read the whole text.
#text = strWords
#text = open(path.join(d, 'alice.txt')).read()
additional_words = [
'rt',
'ebay',
't',
'https',
'co',
't',
'amp',
's',
'tcot',
'de'
]
for word in additional_words:
STOPWORDS.add(word)
# read the mask image
# taken from
# http://www.stencilry.org/stencils/movies/alice%20in%20wonderland/255fk.jpg
#honda_mask = imread(path.join(d, "honda_logo_mask.png"), flatten=True)
#wc = WordCloud(background_color="black", max_words=2000, mask=honda_mask, stopwords=STOPWORDS)
wc = WordCloud(width=800, height=600).generate(text)
# store to file
wc.to_file(path.join(d, "hillary.png"))
# show
plt.imshow(wc)
plt.axis("off")
#plt.figure()
#plt.imshow(honda_mask, cmap=plt.cm.gray)
#plt.axis("off")
plt.show()
wc.process_text(text)[:50]
STOPWORDS
###Output
_____no_output_____ |
all-features/5.5-transplit-DWPCs.ipynb | ###Markdown
Transform and split DWPCs, assess performance
###Code
import itertools
import bz2
import pandas
import numpy
import sklearn.metrics
from scipy.special import logit
unperm_name = 'rephetio-v2.0'
feature_df = pandas.read_table('data/matrix/features.tsv.bz2')
feature_type_df = pandas.read_table('data/matrix/feature-type.tsv')
feature_df.head(2)
feature_type_df.head(2)
def transform_dwpcs(x, scaler):
x = numpy.array(x)
return numpy.arcsinh(x / scaler)
transformed_df = feature_df.copy()
dwpc_features = feature_type_df.query("feature_type == 'dwpc'").feature
degree_features = feature_type_df.query("feature_type == 'degree'").feature
feature_to_scaler = dict(zip(feature_type_df.feature, feature_type_df.unperm_mean))
for column in dwpc_features:
transformed_df[column] = transform_dwpcs(transformed_df[column], feature_to_scaler[column])
column_names = list()
columns = list()
for metapath in dwpc_features:
df = pandas.pivot_table(transformed_df, values=metapath, index=['compound_id', 'disease_id'], columns='hetnet')
df = df[df['rephetio-v2.0'].notnull()]
dwpc = df.iloc[:, 0]
pdwpc = df.iloc[:, 1:].mean(axis='columns')
rdwpc = dwpc - pdwpc
for column in dwpc, pdwpc, rdwpc:
columns.append(column)
for feature_type in 'dwpc', 'pdwpc', 'rdwpc':
column_names.append('{}_{}'.format(feature_type, metapath))
split_df = pandas.concat(columns, axis=1)
split_df.columns = column_names
split_df.reset_index(inplace=True)
split_df.head(2)
base_df = feature_df.query("hetnet == @unperm_name").copy()
base_df.insert(8, 'prior_logit', logit(base_df['prior_prob']))
for metaege in degree_features:
base_df['degree_{}'.format(metaege)] = numpy.arcsinh(base_df[metaege])
base_df.drop(
['hetnet', 'primary', 'prior_prob'] + list(degree_features) + list(dwpc_features),
axis='columns', inplace=True)
transformed_df = base_df.merge(split_df)
transformed_df.head(2)
path = 'data/matrix/rephetio-v2.0/transformed-features.tsv.bz2'
with bz2.open(path, 'wt') as write_file:
transformed_df.to_csv(write_file, sep='\t', index=False, float_format='%.5g')
###Output
_____no_output_____
###Markdown
Compute performance
###Code
rows = list()
for column in transformed_df.columns[transformed_df.columns.str.contains('dwpc')]:
feature_type, metapath = column.split('_', 1)
auroc = sklearn.metrics.roc_auc_score(transformed_df.status, transformed_df[column])
rows.append([feature_type + '_auroc', metapath, auroc])
auroc_df = pandas.DataFrame(rows, columns=['feature_type', 'metapath', 'auroc'])
auroc_df = auroc_df.pivot_table(values='auroc', index='metapath', columns='feature_type').reset_index()
auroc_df.head(2)
primary_auroc_df = pandas.read_table('data/feature-performance/primary-aurocs.tsv')
primary_auroc_df = primary_auroc_df.rename(columns={'feature': 'metapath', 'auroc_permuted': 'pdwpc_primary_auroc', 'pval_auroc': 'pval_delta_auroc'})
primary_auroc_df = primary_auroc_df[['metapath', 'nonzero', 'pdwpc_primary_auroc', 'delta_auroc', 'pval_delta_auroc']]
auroc_df = auroc_df.merge(primary_auroc_df)
auroc_df.head(2)
auroc_df.to_csv('data/feature-performance/auroc.tsv', sep='\t', index=False, float_format='%.5g')
#auroc_df.sort_values('rdwpc_auroc', ascending = False)
auroc_df[-auroc_df.metapath.str.contains('CtD')].sort_values('rdwpc_auroc', ascending = False).head()
###Output
_____no_output_____
###Markdown
Transform and split DWPCs, assess performance
###Code
import itertools
import bz2
import pandas
import numpy
import sklearn.metrics
from scipy.special import logit
unperm_name = 'wikidata-v0.1'
feature_df = pandas.read_table('data/matrix/features.tsv.bz2')
feature_type_df = pandas.read_table('data/matrix/feature-type.tsv')
feature_df.head(2)
feature_type_df.head(2)
def transform_dwpcs(x, scaler):
x = numpy.array(x)
return numpy.arcsinh(x / scaler)
transformed_df = feature_df.copy()
dwpc_features = feature_type_df.query("feature_type == 'dwpc'").feature
degree_features = feature_type_df.query("feature_type == 'degree'").feature
feature_to_scaler = dict(zip(feature_type_df.feature, feature_type_df.unperm_mean))
for column in dwpc_features:
transformed_df[column] = transform_dwpcs(transformed_df[column], feature_to_scaler[column])
column_names = list()
columns = list()
for metapath in dwpc_features:
df = pandas.pivot_table(transformed_df, values=metapath, index=['compound_id', 'disease_id'], columns='hetnet')
df = df[df[unperm_name].notnull()]
dwpc = df.iloc[:, 0]
pdwpc = df.iloc[:, 1:].mean(axis='columns')
rdwpc = dwpc - pdwpc
for column in dwpc, pdwpc, rdwpc:
columns.append(column)
for feature_type in 'dwpc', 'pdwpc', 'rdwpc':
column_names.append('{}_{}'.format(feature_type, metapath))
split_df = pandas.concat(columns, levels=column_names, axis=1)
split_df.columns = column_names
split_df.reset_index(inplace=True)
split_df.head(2)
base_df = feature_df.query("hetnet == @unperm_name").copy()
base_df.insert(8, 'prior_logit', logit(base_df['prior_prob']))
for metaege in degree_features:
base_df['degree_{}'.format(metaege)] = numpy.arcsinh(base_df[metaege])
base_df.drop(
['hetnet', 'primary', 'prior_prob'] + list(degree_features) + list(dwpc_features),
axis='columns', inplace=True)
transformed_df = base_df.merge(split_df)
transformed_df.head(2)
path = 'data/matrix/wikidata-v0.1/transformed-features.tsv.bz2'
with bz2.open(path, 'wt') as write_file:
transformed_df.to_csv(write_file, sep='\t', index=False, float_format='%.5g')
###Output
_____no_output_____
###Markdown
Compute performance
###Code
transformed_df = transformed_df.dropna(axis=1)
transformed_df.head(2)
rows = list()
for column in transformed_df.columns[transformed_df.columns.str.contains('dwpc')]:
feature_type, metapath = column.split('_', 1)
auroc = sklearn.metrics.roc_auc_score(transformed_df.status, transformed_df[column])
rows.append([feature_type + '_auroc', metapath, auroc])
auroc_df = pandas.DataFrame(rows, columns=['feature_type', 'metapath', 'auroc'])
auroc_df = auroc_df.pivot_table(values='auroc', index='metapath', columns='feature_type').reset_index()
auroc_df.head(2)
primary_auroc_df = pandas.read_table('data/feature-performance/primary-aurocs.tsv')
primary_auroc_df = primary_auroc_df.rename(columns={'feature': 'metapath', 'auroc_permuted': 'pdwpc_primary_auroc', 'pval_auroc': 'pval_delta_auroc'})
primary_auroc_df = primary_auroc_df[['metapath', 'nonzero', 'pdwpc_primary_auroc', 'delta_auroc', 'pval_delta_auroc']]
auroc_df = auroc_df.merge(primary_auroc_df)
auroc_df.head(2)
auroc_df.to_csv('data/feature-performance/auroc.tsv', sep='\t', index=False, float_format='%.5g')
#auroc_df.sort_values('rdwpc_auroc', ascending = False)
idx = -auroc_df.metapath.str.contains('CduftD') & ~auroc_df.metapath.str.contains('DduftC')
auroc_df[idx].sort_values('rdwpc_auroc', ascending = False).head()
###Output
_____no_output_____
###Markdown
Visualization Sandbox
###Code
%matplotlib inline
import seaborn
seaborn.jointplot(transformed_df['pdwpc_CpiwPeGgaDso>D'], transformed_df['rdwpc_CpiwPeGgaDso>D'], alpha = 0.1);
seaborn.jointplot(transformed_df['pdwpc_CpiwPeGgaD'], transformed_df['rdwpc_CpiwPeGgaD'], alpha = 0.1);
seaborn.jointplot(auroc_df['dwpc_auroc'], auroc_df['pdwpc_auroc'], alpha = 0.1);
seaborn.jointplot(auroc_df['delta_auroc'], auroc_df['rdwpc_auroc'], alpha = 0.1);
###Output
_____no_output_____
###Markdown
Visualization Sandbox
###Code
%matplotlib inline
import seaborn
seaborn.jointplot(transformed_df['pdwpc_CrCrCbGaD'], transformed_df['rdwpc_CrCrCbGaD'], alpha = 0.1);
seaborn.jointplot(transformed_df['pdwpc_CbGpPWpGaD'], transformed_df['rdwpc_CbGpPWpGaD'], alpha = 0.1);
seaborn.jointplot(auroc_df['dwpc_auroc'], auroc_df['pdwpc_auroc'], alpha = 0.1);
seaborn.jointplot(auroc_df['delta_auroc'], auroc_df['rdwpc_auroc'], alpha = 0.1);
###Output
_____no_output_____ |
Youtube_Transcript_Summarizer.ipynb | ###Markdown
###Code
!pip install -q transformers
!pip install -q youtube_transcript_api
from transformers import pipeline
from youtube_transcript_api import YouTubeTranscriptApi
youtube_video="https://www.youtube.com/watch?v=UF8uR6Z6KLc"
video_id=youtube_video.split("=")[1]
video_id
from IPython.display import YouTubeVideo
YouTubeVideo(video_id)
YouTubeTranscriptApi.get_transcript(video_id)
transcript=YouTubeTranscriptApi.get_transcript(video_id)
transcript[0:5]
result = " "
for i in transcript:
result += ' ' + i['text']
#print(result)
print(len(result))
summarizer=pipeline('summarization')
num_iters = int(len(result)/1000)
summarized_text=[]
for i in range(0, num_iters+1):
start=0
start=i*1000
end=(i+1)*1000
out=summarizer(result[start:end])
out=out[0]
out=out['summary_text']
summarized_text.append(out)
print(summarized_text)
###Output
Your max_length is set to 142, but you input_length is only 31. You might consider decreasing max_length manually, e.g. summarizer('...', max_length=50)
|
03_tail/inertial_sim/inertial_Brownian_motion.ipynb | ###Markdown
Inertial Brownian motion simulationThe Inertial Langevin equation for a particle of mass $m$ and some damping $\gamma$ writes:\begin{equation}m\ddot{x} = -\gamma \dot{x} + \sqrt{2k_\mathrm{B}T \gamma} \mathrm{d}B_t\end{equation}Integrating the latter equation using the Euler method, one can replace $\dot{x}$ by:\begin{equation}\dot{x} \simeq \frac{x_i - x_{i-1}}{\tau} ~,\end{equation}$\ddot{x}$ by:\begin{equation} \begin{aligned} \ddot{x} &\simeq \frac{ \frac{x_i - x_{i-1}}{\tau} - \frac{x_{i-1} - x_{i-2}}{\tau} } {\tau} \\ & = \frac{x_i - 2x_{i - 1} + x_{i-2}}{\tau^2} ~. \end{aligned}\end{equation}and finally, $\mathrm{d}B_t$ by a Gaussian random number $w_i$ with a zero mean value and a $\tau$ variance, on can write $x_i$ as:\begin{equation} x_i = \frac{2 + \tau /\tau_\mathrm{B}}{1 + \tau / \tau_\mathrm{B} } x_{i-1} - \frac{1}{1 + \tau / \tau_\mathrm{B}}x_{i-2} + \frac{\sqrt{2k_\mathrm{B}T\gamma}}{m(1 + \tau/\tau_\mathrm{B})} \tau w_i ~,\end{equation}In the following, we use Python to simulate such a movement and check the properties of the mean squared displacement. Then, I propose a Cython implementation that permits a $200$x speed improvement on the simulation.
###Code
# Import important libraries
import numpy as np
import matplotlib.pyplot as plt
# Just some matplotlib tweaks
import matplotlib as mpl
mpl.rcParams["xtick.direction"] = "in"
mpl.rcParams["ytick.direction"] = "in"
mpl.rcParams["lines.markeredgecolor"] = "k"
mpl.rcParams["lines.markeredgewidth"] = 1.5
mpl.rcParams["figure.dpi"] = 200
from matplotlib import rc
rc("font", family="serif")
rc("text", usetex=True)
rc("xtick", labelsize="medium")
rc("ytick", labelsize="medium")
rc("axes", labelsize="large")
def cm2inch(value):
return value / 2.54
N = 1000000 # number of time steps
tau = 0.01 # simulation time step
m = 1e-8 # particle mass
a = 1e-6 # radius of the particle
eta = 0.001 # viscosity (here water)
gamma = 6 * np.pi * eta * a
kbT = 4e-21
tauB = m / gamma
###Output
_____no_output_____
###Markdown
With such properties we have a characteristic diffusion time $\tau_\mathrm{B} =0.53$ s.
###Code
def xi(xi1, xi2):
"""
Function that compute the position of a particle using the full Langevin Equation
"""
t = tau / tauB
wi = np.random.normal(0, np.sqrt(tau))
return (
(2 + t) / (1 + t) * xi1
- 1 / (1 + t) * xi2
+ np.sqrt(2 * kbT * gamma) / (m * (1 + t)) * np.power(tau,1) * wi
)
def trajectory(N):
"""
Function generating a trajectory of length N.
"""
x = np.zeros(N)
for i in range(2, len(x)):
x[i] = xi(x[i - 1], x[i - 2])
return x
###Output
_____no_output_____
###Markdown
Now that the functions are setup one can simply generate a trajectory of length $N$ by simply calling the the function ```trajectory()```
###Code
# Generate a trajectory of 10e6 points.
x = trajectory(1000000)
plt.plot(np.arange(len(x))*tau, x)
plt.title("Intertial Brownian trajectory")
plt.ylabel("$x$ (m)")
plt.xlabel("$t$ (s)")
plt.show()
###Output
_____no_output_____
###Markdown
Cross checking We now check that the simulated trajectory gives us the correct MSD properties to ensure the simulation si done properly. The MSD given by:\begin{equation}\mathrm{MSD}(\Delta t) = \left. \langle \left( x(t) - x(t+\Delta t \right)^2 \rangle \right|_t ~,\end{equation}with $\Delta t$ a lag time. The MSD, can be computed using the function defined in the cell below. For a lag time $\Delta t \ll \tau_B$ we should have:\begin{equation}\mathrm{MSD}(\Delta t) = \frac{k_\mathrm{B}T}{m} \Delta t ^2 ~,\end{equation}and for $\Delta t \gg \tau_B$:\begin{equation}\mathrm{MSD}(\tau) = 2 D \Delta t~,\end{equation}with $D = k_\mathrm{B}T / (6 \pi \eta a)$.
###Code
t = np.array([*np.arange(3,10,1), *np.arange(10,100,10), *np.arange(100,1000,100), *np.arange(1000,8000,1000)])
def msd(x,Dt):
"""Function that return the MSD for a list of time index t for a trajectory x"""
_msd = lambda x, t : np.mean((x[:-t] - x[t:])**2)
return [_msd(x,i) for i in t]
MSD = msd(x,t)
D = kbT/(6*np.pi*eta*a)
t_plot = t*tau
plt.loglog(t*tau,MSD, "o")
plt.plot(t*tau, (2*D*t_plot), "--", color = "k", label="long time theory")
plt.plot(t*tau, kbT/m * t_plot**2, ":", color = "k", label="short time theory")
plt.ylabel("MSD (m$^2$)")
plt.xlabel("$\Delta t$ (s)")
horiz_data = [1e-8, 1e-17]
t_horiz = [tauB, tauB]
plt.plot(t_horiz, horiz_data, "k", label="$\\tau_\mathrm{B}$")
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
The simulations gives expected results. However, with the computer used, 6 seconds are needed to generate this trajectory. If someone wants to look at fine effects and need to generate millions of trajectories it is too long. In order to fasten the process, in the following I use Cython to generate the trajectory using C language. Cython acceleration
###Code
# Loading Cython library
%load_ext Cython
###Output
_____no_output_____
###Markdown
We now write the same functions as in the first part of the appendix. However, we now indicate the type of each variable.
###Code
%%cython
import cython
cimport numpy as np
import numpy as np
from libc.math cimport sqrt
ctypedef np.float64_t dtype_t
cdef int N = 1000000 # length of the simulation
cdef dtype_t tau = 0.01 # simulation time step
cdef dtype_t m = 1e-8 # particle mass
cdef dtype_t a = 1e-6 # radius of the particle
cdef dtype_t eta = 0.001 # viscosity (here water)
cdef dtype_t gamma = 6 * 3.14 * eta * a
cdef dtype_t kbT = 4e-21
cdef dtype_t tauB = m/gamma
cdef dtype_t[:] x = np.zeros(N)
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.nonecheck(False)
@cython.cdivision(True)
cdef dtype_t xi_cython( dtype_t xi1, dtype_t xi2, dtype_t wi):
cdef dtype_t t = tau / tauB
return (
(2 + t) / (1 + t) * xi1
- 1 / (1 + t) * xi2
+ sqrt(2 * kbT * gamma) / (m * (1 + t)) * tau * wi
)
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.nonecheck(False)
cdef dtype_t[:] _traj(dtype_t[:] x, dtype_t[:] wi):
cdef int i
for i in range(2, N):
x[i] = xi_cython(x[i-1], x[i-2], wi[i])
return x
def trajectory_cython():
cdef dtype_t[:] wi = np.random.normal(0, np.sqrt(tau), N).astype('float64')
return _traj(x, wi)
%timeit trajectory(1000000)
%timeit trajectory_cython()
###Output
30.6 ms ± 495 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
###Markdown
Again, we check that the results given through the use of Cython gives the correct MSD
###Code
x=np.asarray(trajectory_cython())
D = kbT/(6*np.pi*eta*a)
t_plot = t*tau
plt.loglog(t*tau,MSD, "o")
plt.plot(t*tau, (2*D*t_plot), "--", color = "k", label="long time theory")
plt.plot(t*tau, kbT/m * t_plot**2, ":", color = "k", label="short time theory")
horiz_data = [1e-8, 1e-17]
t_horiz = [tauB, tauB]
plt.plot(t_horiz, horiz_data, "k", label="$\\tau_\mathrm{B}$")
plt.xlabel("$\\Delta t$ (s)")
plt.ylabel("MSD (m$^2$)")
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
ConclusionFinally, one only needs $\simeq 30$ ms to generate the trajectory instead of $\simeq 7$ s which is a$\simeq 250\times$ improvement speed. The simulation si here bound to the time needed to generate the array of random numbers which is still done using numpy function. After further checking, Numpy random generation si as optimize as one could do so there is no benefit on cythonizing the random generation. For the sake of completness one could fine a Cython version to generate random numbers. Found thanks to Senderle on [Stackoverflow](https://stackoverflow.com/questions/42767816/what-is-the-most-efficient-and-portable-way-to-generate-gaussian-random-numbers). Tacking into account that, the time improvment on the actual computation of the trajectory **without** the random number generation is done with an $\simeq 1100\times$ improvement speed.
###Code
%%cython
from libc.stdlib cimport rand, RAND_MAX
from libc.math cimport log, sqrt
import numpy as np
import cython
cdef double random_uniform():
cdef double r = rand()
return r / RAND_MAX
cdef double random_gaussian():
cdef double x1, x2, w
w = 2.0
while (w >= 1.0):
x1 = 2.0 * random_uniform() - 1.0
x2 = 2.0 * random_uniform() - 1.0
w = x1 * x1 + x2 * x2
w = ((-2.0 * log(w)) / w) ** 0.5
return x1 * w
@cython.boundscheck(False)
cdef void assign_random_gaussian_pair(double[:] out, int assign_ix):
cdef double x1, x2, w
w = 2.0
while (w >= 1.0):
x1 = 2.0 * random_uniform() - 1.0
x2 = 2.0 * random_uniform() - 1.0
w = x1 * x1 + x2 * x2
w = sqrt((-2.0 * log(w)) / w)
out[assign_ix] = x1 * w
out[assign_ix + 1] = x2 * w
@cython.boundscheck(False)
def my_uniform(int n):
cdef int i
cdef double[:] result = np.zeros(n, dtype='f8', order='C')
for i in range(n):
result[i] = random_uniform()
return result
@cython.boundscheck(False)
def my_gaussian(int n):
cdef int i
cdef double[:] result = np.zeros(n, dtype='f8', order='C')
for i in range(n):
result[i] = random_gaussian()
return result
@cython.boundscheck(False)
def my_gaussian_fast(int n):
cdef int i
cdef double[:] result = np.zeros(n, dtype='f8', order='C')
for i in range(n // 2): # Int division ensures trailing index if n is odd.
assign_random_gaussian_pair(result, i * 2)
if n % 2 == 1:
result[n - 1] = random_gaussian()
return result
%timeit my_gaussian_fast(1000000)
%timeit np.random.normal(0,1,1000000)
###Output
26.4 ms ± 1.87 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
###Markdown
One can thus see, that even a pure C implementation can be slower than the Numpy one, thanks to a great optimization.
###Code
fig = plt.figure(figsize = (cm2inch(16), cm2inch(10)))
gs = fig.add_gridspec(2, 1)
f_ax1 = fig.add_subplot(gs[0, 0])
for i in range(100):
x = np.asarray(trajectory_cython())* 1e6
plt.plot(np.arange(N)*tau / 60, x)
plt.ylabel("$x$ ($\mathrm{\mu m}$)")
plt.xlabel("$t$ (min)")
plt.text(5,100, "a)")
plt.xlim([0,160])
f_ax1 = fig.add_subplot(gs[1, 0])
x=np.asarray(trajectory_cython())
D = kbT/(6*np.pi*eta*a)
plt.loglog(t*tau,MSD, "o")
t_plot = np.linspace(0.5e-2,5e3,1000)
plt.plot(t_plot, (2*D*t_plot), "--", color = "k", label="long time theory")
plt.plot(t_plot, kbT/m * t_plot**2, ":", color = "k", label="short time theory")
horiz_data = [1e-7, 1e-18]
t_horiz = [tauB, tauB]
plt.plot(t_horiz, horiz_data, "k", label="$\\tau_\mathrm{B}$")
plt.ylabel("MSD (m$^2$)")
plt.xlabel("$\\Delta t$ (s)")
ax = plt.gca()
locmaj = mpl.ticker.LogLocator(base=10.0, subs=(1.0, ), numticks=100)
ax.yaxis.set_major_locator(locmaj)
locmin = mpl.ticker.LogLocator(base=10.0, subs=np.arange(2, 10) * .1,
numticks=100)
ax.yaxis.set_minor_locator(locmin)
ax.yaxis.set_minor_formatter(mpl.ticker.NullFormatter())
plt.legend(frameon=False)
plt.text(0.7e2,1e-15, "b)")
plt.xlim([0.8e-2,1e2])
plt.ylim([1e-16,1e-10])
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
plt.savefig("intertial_langevin.pdf")
plt.show()
###Output
_____no_output_____ |
Machine Learning/Natural Language Processing/Document Classification.ipynb | ###Markdown
Document Classification **Import needed libraries**
###Code
import numpy as np
import pandas as pd
import spacy
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
###Output
_____no_output_____
###Markdown
**Classification**
###Code
# Read in the locally saved file from the link above
df_yelp = pd.read_csv('data/yelp_labelled.txt', names=['sentence', 'label'], sep='\t')
df_yelp.head()
###Output
_____no_output_____
###Markdown
**split dataset**- doing before vectorization to avoid leaking data
###Code
# Create the feature and target variables
sentences = df_yelp['sentence']
y = df_yelp['label']
# Train-test split
sentences_train, sentences_test, y_train, y_test = train_test_split(
sentences, y, test_size=0.25, random_state=42)
###Output
_____no_output_____
###Markdown
**Vectorizing**
###Code
# Instantiate and fit the tf-idf vectorizer
vectorizer = TfidfVectorizer(stop_words='english', ngram_range = (2,2))
vectorizer.fit(sentences_train)
# Vectorize the training and testing data
X_train = vectorizer.transform(sentences_train)
X_test = vectorizer.transform(sentences_test)
# Display the properties of the vectorized text
X_train
###Output
_____no_output_____
###Markdown
**Baseline classification score**
###Code
# Instantiate and fit a model
classifier = LogisticRegression(solver='lbfgs')
classifier.fit(X_train, y_train)
score = classifier.score(X_test, y_test)
print("Accuracy:", score)
###Output
Accuracy: 0.588
###Markdown
**Creating a two step Pipline for cross fold validation**- Vectorizer- Classifier
###Code
# Define the Pipeline
pipe = Pipeline([('vect', vectorizer), # vectorizer
('clf', classifier) # classifier
])
# Define the parameter space for the grid serach
parameters = {'clf__C': [1, 10, 1000000]} # C: regularization strength
grid_search = GridSearchCV(pipe, parameters, cv=5, n_jobs=-1, verbose=1)
grid_search.fit(sentences, y);
# Print out the best score
grid_search.best_score_
###Output
Fitting 5 folds for each of 3 candidates, totalling 15 fits
###Markdown
**Pipline with random forest**
###Code
# Instantiate and fit a model
classifier = RandomForestClassifier()
classifier.fit(X_train, y_train)
score = classifier.score(X_test, y_test)
print("Accuracy:", score)
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start = 10, stop = 100, num = 10)]
# Number of features to consider at every split
max_features = ['auto', 'sqrt']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(10, 50, num = 10)]
max_depth.append(None)
# Minimum number of samples required to split a node
min_samples_split = [2, 10]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 4]
Method of selecting samples for training each tree
bootstrap = [True, False]
# Define the Pipeline
pipe = Pipeline([('vect', vectorizer), # vectorizer
('clf', classifier) # classifier
])
# Define the parameter space for the grid serach
# you need the 'clf' name followed by two __ then the parameter 'n_estimators'
parameters = {'clf__n_estimators': n_estimators,
'clf__max_features': max_features,
'clf__max_depth': max_depth,
'clf__min_samples_split': min_samples_split,
'clf__min_samples_leaf': min_samples_leaf,
'clf__bootstrap': bootstrap}
grid_search = GridSearchCV(pipe, parameters, cv=5, n_jobs=-1, verbose=1)
grid_search.fit(sentences, y);
# Print out the best score
grid_search.best_score_
###Output
Fitting 5 folds for each of 40 candidates, totalling 200 fits
###Markdown
**Singular Value Decomposition (SVD)**
###Code
df_yelp = pd.read_csv('data/yelp_labelled.txt', names=['sentence', 'label'], sep='\t')
df_yelp.head()
# Create the features and target
sentences = df_yelp['sentence']
y = df_yelp['label']
# Instantiate the tf-idf vectorizer
vectorizer = TfidfVectorizer(stop_words='english', ngram_range = (2,2))
# Instantiate the classifier (defaults)
classifier = LogisticRegression(solver='lbfgs')
# Instantiate the LSA (SVD) algorithm (defaults)
svd = TruncatedSVD()
# LSA part
lsa = Pipeline([('vect', vectorizer), ('svd', svd)])
# Combine into one pipeline
pipe = Pipeline([('lsa', lsa), ('clf', classifier)])
# Define the parameter space for the grid search
parameters = {
'lsa__svd__n_components': (100,250),
'lsa__vect__max_df': (0.9, 1.0), # max document frequency
}
grid_search = GridSearchCV(pipe, parameters, cv=5, n_jobs=-1, verbose=1)
grid_search.fit(sentences, y);
# Display the best score from the grid-search
grid_search.best_score_
###Output
Fitting 5 folds for each of 4 candidates, totalling 20 fits
###Markdown
**SVD with amazon dataset**
###Code
df_yelp = pd.read_csv('data/amazon_cells_labelled.txt', names=['sentence', 'label'], sep='\t')
df_yelp.head()
# Create the features and target
sentences = df_yelp['sentence']
y = df_yelp['label']
# Instantiate the tf-idf vectorizer
vectorizer = TfidfVectorizer(stop_words='english', ngram_range = (2,2))
# Instantiate the classifier (defaults)
classifier = LogisticRegression(solver='lbfgs')
# Instantiate the LSA (SVD) algorithm (defaults)
svd = TruncatedSVD()
# LSA part
lsa = Pipeline([('vect', vectorizer), ('svd', svd)])
# Combine into one pipeline
pipe = Pipeline([('lsa', lsa), ('clf', classifier)])
# Define the parameter space for the grid search
parameters = {
'lsa__svd__n_components': (100,250),
'lsa__vect__max_df': (0.9, 1.0), # max document frequency
}
grid_search = GridSearchCV(pipe, parameters, cv=5, n_jobs=-1, verbose=1)
grid_search.fit(sentences, y);
# Display the best score from the grid-search
grid_search.best_score_
###Output
Fitting 5 folds for each of 4 candidates, totalling 20 fits
###Markdown
**Pipeline spacy word embeddings**
###Code
nlp = spacy.load("en_core_web_lg")
# Read in the locally saved file from UCI website
df_yelp = pd.read_csv('data/yelp_labelled.txt', names=['sentence', 'label'], sep='\t')
df_yelp.head()
# Create the features and target
sentences = df_yelp['sentence']
y = df_yelp['label']
# Train-test split
sentences_train, sentences_test, y_train, y_test = train_test_split(
sentences, y, test_size=0.25, random_state=42)
# Function to return the vector for each sentence in a document
def get_word_vectors(docs):
return [nlp(doc).vector for doc in docs]
# Get the vectors for each sentence (mean of all the word vectors)
X_train = get_word_vectors(sentences_train)
X_test = get_word_vectors(sentences_test)
# Instantiate the classifier (defaults)
classifier = LogisticRegression(solver='lbfgs')
# Fit the model
classifier.fit(X_train, y_train)
score = classifier.score(X_test, y_test)
# Print out the accuracy score
print("Accuracy including word embeddings: ", score)
###Output
Accuracy including word embeddings: 0.856
###Markdown
**amazon data**
###Code
nlp = spacy.load("en_core_web_lg")
# Read in the locally saved file from UCI website
df_yelp = pd.read_csv('data/amazon_cells_labelled.txt', names=['sentence', 'label'], sep='\t')
df_yelp.head()
# Create the features and target
sentences = df_yelp['sentence']
y = df_yelp['label']
# Train-test split
sentences_train, sentences_test, y_train, y_test = train_test_split(
sentences, y, test_size=0.25, random_state=42)
# Function to return the vector for each sentence in a document
def get_word_vectors(docs):
return [nlp(doc).vector for doc in docs]
# Get the vectors for each sentence (mean of all the word vectors)
X_train = get_word_vectors(sentences_train)
X_test = get_word_vectors(sentences_test)
# Instantiate the classifier (defaults)
classifier = LogisticRegression(solver='lbfgs')
# Fit the model
classifier.fit(X_train, y_train)
score = classifier.score(X_test, y_test)
# Print out the accuracy score
print("Accuracy including word embeddings: ", score)
###Output
Accuracy including word embeddings: 0.86
|
notebooks/1.7-BrunoGomesCoelho_Colab_with_temperature_after_experiment.ipynb | ###Markdown
###Code
# Colab console code
"""
function ClickConnect(){
console.log("Working");
document.querySelector("colab-toolbar-button#connect").click()
}setInterval(ClickConnect,60000)
"""
import time
start_time = time.time()
COLAB_IDX = 1
TEMPERATURE = "t03"
TESTING = False
COLAB = True
if COLAB:
BASE_DIR = "/content/drive/My Drive/IC/mosquito-networking/"
else:
BASE_DIR = "../"
from google.colab import drive
drive.mount('/content/drive')
import sys
sys.path.append("/content/drive/My Drive/IC/mosquito-networking/")
!python3 -m pip install -qr "/content/drive/My Drive/IC/mosquito-networking/drive_requirements.txt"
###Output
[K |████████████████████████████████| 2.2MB 3.4MB/s
[K |████████████████████████████████| 71kB 24.7MB/s
[K |████████████████████████████████| 122kB 49.7MB/s
[K |████████████████████████████████| 51kB 20.9MB/s
[K |████████████████████████████████| 5.7MB 30.1MB/s
[K |████████████████████████████████| 61kB 21.3MB/s
[K |████████████████████████████████| 51kB 19.7MB/s
[?25h Building wheel for autopep8 (setup.py) ... [?25l[?25hdone
Building wheel for jupyter-tensorboard (setup.py) ... [?25l[?25hdone
###Markdown
- - - Trying out a full pytorch experiment, with tensorboard, // processing, etc
###Code
# OPTIONAL: Load the "autoreload" extension so that code can change
#%load_ext autoreload
# OPTIONAL: always reload modules so that as you change code in src, it gets loaded
#%autoreload 2
import numpy as np
import pandas as pd
from src.data import make_dataset
from src.data import read_dataset
from src.data import util
from src.data.colab_dataset import MosquitoDataTemperature
import joblib
from torchsummary import summary
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
###Output
_____no_output_____
###Markdown
Experiment params
###Code
# Parameters
params = {'batch_size': 20,
'shuffle': True,
'num_workers': 0}
max_epochs = 1
if TESTING:
params["num_workers"] = 0
version = !python3 --version
version = version[0].split(".")[1]
if int(version) < 7 and params["num_workers"]:
print("WARNING\n"*10)
print("Parallel execution only works for python3.7 or above!")
print("Running in parallel with other versions is not guaranted to work")
print("See https://discuss.pytorch.org/t/valueerror-signal-number-32-out-of-range-when-loading-data-with-num-worker-0/39615/2")
## Load gpu or cpu
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"Using device {device}")
###Output
Using device cuda:0
###Markdown
load data
###Code
# Load scaler
#scaler = joblib.load("../data/interim/scaler.pkl")
scaler = joblib.load(BASE_DIR + f"data/interim/scaler_{TEMPERATURE}.pkl")
data = np.load(BASE_DIR + f"data/interim/all_wavs_{TEMPERATURE}.npz",
allow_pickle=True)
# Parse and divide data
train_data = data["train"]
val_data = data["val"]
test_data = data["test"]
x_train = train_data[:, :-1]
y_train = train_data[:, -1]
x_val = val_data[:, :-1]
y_val = val_data[:, -1]
x_test = test_data[:, :-1]
y_test = test_data[:, -1]
# Generators
training_set = MosquitoDataTemperature(x_train, y_train,
device=device, scaler=scaler, roll=0.1)
training_generator = torch.utils.data.DataLoader(training_set, **params,
pin_memory=True)
val_set = MosquitoDataTemperature(x_val, y_val,
device=device, scaler=scaler)
val_generator = torch.utils.data.DataLoader(val_set, **params,
pin_memory=True)
test_set = MosquitoDataTemperature(x_test, y_test,
device=device, scaler=scaler)
test_generator = torch.utils.data.DataLoader(test_set, **params,
pin_memory=True)
#sc Generate some example data
temp_generator = torch.utils.data.DataLoader(training_set, **params)
for (local_batch, local_labels) in temp_generator:
example_x = local_batch
example_y = local_labels
break
###Output
_____no_output_____
###Markdown
Load model
###Code
from src.models.BasicMosquitoNet2 import BasicMosquitoNet
# create your optimizer
net = BasicMosquitoNet()
#net.load_state_dict(torch.load(BASE_DIR +
#f"runs/colab/{COLAB_IDX-1}/model_epoch_90.pt"))
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.SGD(net.parameters(), lr=0.1, momentum=0.9)
if device.type == "cuda":
net.cuda()
summary(net, input_size=example_x.shape[1:])
###Output
----------------------------------------------------------------
Layer (type) Output Shape Param #
================================================================
Conv1d-1 [-1, 64, 7309] 8,256
BatchNorm1d-2 [-1, 64, 7309] 128
Conv1d-3 [-1, 32, 3527] 262,176
BatchNorm1d-4 [-1, 32, 3527] 64
Linear-5 [-1, 1] 56,417
================================================================
Total params: 327,041
Trainable params: 327,041
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 0.08
Forward/backward pass size (MB): 8.86
Params size (MB): 1.25
Estimated Total Size (MB): 10.19
----------------------------------------------------------------
###Markdown
Start tensorboard
###Code
from torch.utils.tensorboard import SummaryWriter
save_path = BASE_DIR + f"runs/colab/{TEMPERATURE}/{COLAB_IDX}/"
# default `log_dir` is "runs" - we'll be more specific here
writer = SummaryWriter(save_path, max_queue=3)
###Output
_____no_output_____
###Markdown
train function
###Code
# Simple train function
def train(net, optimizer, max_epochs, testing=False, testing_idx=0,
save_idx=1, save_path=""):
# Loop over epochs
last_test_loss = 0
for epoch in range(max_epochs):
# Training
cumulative_train_loss = 0
cumulative_train_acc = 0
amount_train_samples = 0
for idx, (local_batch, local_labels) in enumerate(training_generator):
amount_train_samples += len(local_batch)
local_batch, local_labels = util.convert_cuda(local_batch,
local_labels,
device)
optimizer.zero_grad() # zero the gradient buffers
output = net(local_batch)
# Stores loss
pred = output >= 0.5
cumulative_train_acc += pred.float().eq(local_labels).sum().data.item()
loss = criterion(output, local_labels)
cumulative_train_loss += loss.data.item()
loss.backward()
optimizer.step() # Does the update
if testing and idx == testing_idx:
break
cumulative_train_loss /= (idx+1)
cumulative_train_acc /= amount_train_samples
writer.add_scalar("Train Loss", cumulative_train_loss, epoch)
writer.add_scalar("Train Acc", cumulative_train_acc, epoch)
# Validation
with torch.set_grad_enabled(False):
for name, generator in zip(["Val", "Test"],
[val_generator, test_generator]):
cumulative_test_loss = 0
cumulative_test_acc = 0
amount_test_samples = 0
for idx, (local_batch, local_labels) in enumerate(generator):
amount_test_samples += len(local_batch)
local_batch, local_labels = util.convert_cuda(local_batch,
local_labels,
device)
output = net(local_batch)
loss = criterion(output, local_labels)
cumulative_test_loss += loss.data.item()
# Stores loss
pred = output >= 0.5
cumulative_test_acc += pred.float().eq(local_labels).sum().data.item()
if testing:
break
cumulative_test_loss /= (idx+1)
cumulative_test_acc /= amount_test_samples
writer.add_scalar(f"{name} Loss", cumulative_test_loss, epoch)
writer.add_scalar(f"{name} Acc", cumulative_test_acc, epoch)
torch.save(net.state_dict(), save_path + f"model_epoch_{epoch}.pt")
writer.close()
return cumulative_test_loss
%%time
train(net, optimizer, 2000, testing=TESTING, save_path=save_path)
print(time.time() - start_time)
#!pip install line_profiler
#%load_ext line_profiler
#%lprun -f train train(net, optimizer, 1, testing=False, save_path=save_path)
###Output
_____no_output_____ |
camilo_torres_botero/Ejercicios 1.2 Weak Ties & Random Networks.ipynb | ###Markdown
Ejercicios Weak Ties & Random NetworksEjercicios básicos de redes Ejercicio Clustering CoeficientCalcule el coeficiente de clustering para cada nodo y en la red (sin dirección)
###Code
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
edges = set([(1,2),(2,1),(2,3), (2,4), (2,5), (4,5), (4,6), (5,6), (4,7)])
# nodes = set()
for x,y in edges:
nodes.add(x)
nodes.add(y)
print(nodes)
def get_vecinos(nodo):
vecinos = set()
for s,t in edges:
if s == nodo:
vecinos.add(t)
if t == nodo:
vecinos.add(s)
return vecinos
def get_l_k_node(nodo):
vecinos_nodo = get_vecinos(nodo)
k = len(vecinos_nodo)
l = 0
for s in vecinos_nodo:
for t in vecinos_nodo:
if (s,t) in edges:
l += 1
return l,k
def get_clustering_coefficient(nodo):
l,k = get_l_k_node(nodo)
combinatoria = k*(k-1)
if combinatoria != 0:
c_nodo = (2*l)/(k*(k-1))
else:
c_nodo = 0
return c_nodo
print("CALCULO DEL COEFICIENTE DE CLUSTERING SIN LA LIBRERIA:")
print(" ")
for x in nodes:
clustering_coefficient_x = get_clustering_coefficient(x)
print("Coeficiente de clustering de ", x, ":", clustering_coefficient_x)
print(" ")
print("CALCULO DEL COEFICIENTE DE CLUSTERING CON LA LIBRERIA NETWORKX:")
print(" ")
G=nx.Graph()
G.add_edges_from(edges)
print(nx.clustering(G))
nx.draw_networkx(G)
###Output
{1, 2, 3, 4, 5, 6, 7}
CALCULO DEL COEFICIENTE DE CLUSTERING SIN LA LIBRERIA:
Coeficiente de clustering de 1 : 0
Coeficiente de clustering de 2 : 0.16666666666666666
Coeficiente de clustering de 3 : 0
Coeficiente de clustering de 4 : 0.3333333333333333
Coeficiente de clustering de 5 : 0.6666666666666666
Coeficiente de clustering de 6 : 1.0
Coeficiente de clustering de 7 : 0
CALCULO DEL COEFICIENTE DE CLUSTERING CON LA LIBRERIA NETWORKX:
{1: 0.0, 2: 0.16666666666666666, 4: 0.3333333333333333, 7: 0.0, 6: 1.0, 5: 0.6666666666666666, 3: 0.0}
|
05.06_Bin_Counting.ipynb | ###Markdown
- [Click-through ad data from Kaggle competition](https://www.kaggle.com/c/avazu-ctr-prediction/data)- train_subset is first 10K rows of 6+GB set
###Code
# df = pd.read_csv(folder_data + '/train_subset.csv')
df = pd.read_csv(folder_data + '/train')
print(df.shape)
df.head(3)
# how many features should we have after?
len(df['device_id'].unique())
df.filter(items= [bin_column, 'click']).drop_duplicates()[:10]
###Output
_____no_output_____
###Markdown
Features are $\theta$ = [$N^+$, $N^-$, $log(N^+)-log(N^-)$, isRest]$N^+$ = $p(+)$ = $n^+/(n^+ + n^-)$$N^-$ = $p(-)$ = $n^-/(n^+ + n^-)$$log(N^+)-log(N^-)$ = $\frac{p(+)}{p(-)}$isRest = back-off bin (not shown here)
###Code
def click_counting(x, bin_column):
clicks = pd.Series(x[x['click'] > 0][bin_column].value_counts(), name='clicks')
no_clicks = pd.Series(x[x['click'] < 1][bin_column].value_counts(), name='no_clicks')
counts = pd.DataFrame([clicks,no_clicks]).T.fillna('0')
counts['total'] = counts['clicks'].astype('int64') + counts['no_clicks'].astype('int64')
return counts
def bin_counting(counts):
counts['N+'] = counts['clicks'].astype('int64').divide(counts['total'].astype('int64'))
counts['N-'] = counts['no_clicks'].astype('int64').divide(counts['total'].astype('int64'))
counts['log_N+'] = counts['N+'].divide(counts['N-'])
# If we wanted to only return bin-counting properties, we would filter here
bin_counts = counts.filter(items= ['N+', 'N-', 'log_N+'])
return counts, bin_counts
# bin counts example: device_id
bin_column = 'device_id'
device_clicks = click_counting(df.filter(items= [bin_column, 'click']), bin_column)
device_all, device_bin_counts = bin_counting(device_clicks)
# check to make sure we have all the devices
print(len(device_bin_counts))
device_bin_counts[:10]
device_all.sort_values(by = 'total', ascending=False).head(4)
# We can see how this can change model evaluation time by comparing raw vs. bin-counting size
from sys import getsizeof
print('Our pandas Series, in bytes: ', getsizeof(df.filter(items= ['device_id', 'click'])))
print('Our bin-counting feature, in bytes: ', getsizeof(device_bin_counts))
###Output
Our pandas Series, in bytes: 2951314751
Our bin-counting feature, in bytes: 239090344
###Markdown
- [Click-through ad data from Kaggle competition](https://www.kaggle.com/c/avazu-ctr-prediction/data)- train_subset is first 10K rows of 6+GB set
###Code
df = pd.read_csv('data/train_subset.csv')
df.head(3)
# how many features should we have after?
len(df['device_id'].unique())
###Output
_____no_output_____
###Markdown
Features are $\theta$ = [$N^+$, $N^-$, $log(N^+)-log(N^-)$, isRest]$N^+$ = $p(+)$ = $n^+/(n^+ + n^-)$$N^-$ = $p(-)$ = $n^-/(n^+ + n^-)$$log(N^+)-log(N^-)$ = $\frac{p(+)}{p(-)}$isRest = back-off bin (not shown here)
###Code
def click_counting(x, bin_column):
clicks = pd.Series(x[x['click'] > 0][bin_column].value_counts(), name='clicks')
no_clicks = pd.Series(x[x['click'] < 1][bin_column].value_counts(), name='no_clicks')
counts = pd.DataFrame([clicks,no_clicks]).T.fillna('0')
counts['total'] = counts['clicks'].astype('int64') + counts['no_clicks'].astype('int64')
return counts
def bin_counting(counts):
counts['N+'] = counts['clicks'].astype('int64').divide(counts['total'].astype('int64'))
counts['N-'] = counts['no_clicks'].astype('int64').divide(counts['total'].astype('int64'))
counts['log_N+'] = counts['N+'].divide(counts['N-'])
# If we wanted to only return bin-counting properties, we would filter here
bin_counts = counts.filter(items= ['N+', 'N-', 'log_N+'])
return counts, bin_counts
# bin counts example: device_id
bin_column = 'device_id'
device_clicks = click_counting(df.filter(items= [bin_column, 'click']), bin_column)
device_all, device_bin_counts = bin_counting(device_clicks)
# check to make sure we have all the devices
len(device_bin_counts)
device_all.sort_values(by = 'total', ascending=False).head(4)
# We can see how this can change model evaluation time by comparing raw vs. bin-counting size
from sys import getsizeof
print('Our pandas Series, in bytes: ', getsizeof(df.filter(items= ['device_id', 'click'])))
print('Our bin-counting feature, in bytes: ', getsizeof(device_bin_counts))
###Output
Our pandas Series, in bytes: 7300031
Our bin-counting feature, in bytes: 525697
|
SageMath code/upstates-ForcedBalance stability-with Iext.ipynb | ###Markdown
Up states: Stability of Forced-Balance learning rule Soldado-Magraner, Laje & Buonomano (2020) Author: Rodrigo Laje (With constant input current $I_{ext}$ in order to have a non-paradoxical fixed point.) Neural subsystem Load script: (RUN FIRST `upstates-Neural subsystem stability-with Iext.ipynb`)
###Code
%%capture
load('upstates-Neural subsystem stability-with Iext.sage.py')
###Output
_____no_output_____
###Markdown
Synaptic subsystem: Forced-Balance rule Forced-Balance rule equations Weights $W_{EE}$ and $W_{IE}$ converge to the Up state (if stable), while $W_{EI}$ and $W_{II}$ are non-instantaneous followers (i.e. they decay to the corresponding line attractor)
###Code
var('dWEEdt,dWIEdt')
var('alpha_1,alpha_3')
f_WEE = dWEEdt == alpha_1*g_E*E*(E_set-E)
f_WIE = dWIEdt == -alpha_3*g_I*E*(I_set-I)
show(f_WEE)
show(f_WIE)
###Output
_____no_output_____
###Markdown
The other two weights are (non-instantaneous) followers of $W_{EE}$ and $W_{IE}$:
###Code
WEI_ff,WII_ff = solve([E.subs(E_up)==E_set,I.subs(I_up)==I_set],W_EI,W_II)[0]
show(WEI_ff)
show(WII_ff)
var('dWEIdt,dWIIdt,tau_ff')
f_WEI = dWEIdt == (W_EI.subs(WEI_ff) - W_EI)/tau_ff
f_WII = dWIIdt == (W_II.subs(WII_ff) - W_II)/tau_ff
###Output
_____no_output_____
###Markdown
Full synaptic subsystem:
###Code
show(f_WEE)
show(f_WEI)
show(f_WIE)
show(f_WII)
synapticFixedPoint = solve([f_WEE.subs(dWEEdt==0),f_WEI.subs(dWEIdt==0),f_WIE.subs(dWIEdt==0),f_WII.subs(dWIIdt==0)],E,I,W_EI,W_II)
synapticFixedPoint_up = synapticFixedPoint[1]
###Output
_____no_output_____
###Markdown
Two solutions but only one of them is compatible with suprathreshold activity: Fixed point: Up state $E=E_{set}$, $I=I_{set}$ ($W_{EE}$ and $W_{IE}$ can take any value)
###Code
show(synapticFixedPoint_up)
###Output
_____no_output_____
###Markdown
Quasi-steady-state approximation1. Full system neural+synaptic2. Nondimensionalization3. Normalization4. Approximation (fast neural subsystem) Full system
###Code
show(f_E)
show(f_I)
show(f_WEE)
show(f_WEI)
show(f_WIE)
show(f_WII)
###Output
_____no_output_____
###Markdown
Nondimensionalization Definition of nondimensional variables Define new (nondimensional) variables $e$, $i$, etc. and scaling parameters $E_0$, $I_0$, etc: $e = E/E_0$ $i = I/I_0$ $\tau = t/\tau_0$ $w_{EE} = W_{EE}/W_{EE0}$ ... etc
###Code
var('e,i')
var('E_0,I_0,tau_0')
var('w_EE,w_EI,w_IE,w_II')
var('W_EE0,W_EI0,W_IE0,W_II0')
var('dedtau,didtau')
var('dwEEdtau,dwEIdtau,dwIEdtau,dwIIdtau');
###Output
_____no_output_____
###Markdown
Replacement rules:
###Code
nondim = [E==E_0*e,dEdt==(E_0/tau_0)*dedtau,I==I_0*i,dIdt==(I_0/tau_0)*didtau,W_EE==W_EE0*w_EE,W_EI==W_EI0*w_EI,W_IE==W_IE0*w_IE,W_II==W_II0*w_II,dWEEdt==(W_EE0/tau_0)*dwEEdtau,dWEIdt==(W_EI0/tau_0)*dwEIdtau,dWIEdt==(W_IE0/tau_0)*dwIEdtau,dWIIdt==(W_II0/tau_0)*dwIIdtau]
show(nondim)
###Output
_____no_output_____
###Markdown
Full system after replacement:
###Code
f_e = f_E.subs(nondim)*tau_E/E_0
f_i = f_I.subs(nondim)*tau_I/I_0
f_wEE = f_WEE.subs(nondim)*tau_0/W_EE0
f_wEI = f_WEI.subs(nondim)*tau_0/W_EI0
f_wIE = f_WIE.subs(nondim)*tau_0/W_IE0
f_wII = f_WII.subs(nondim)*tau_0/W_II0
show(f_e.expand())
show(f_i.expand())
show(f_wEE.expand())
show(f_wEI.expand())
show(f_wIE.expand())
show(f_wII.expand())
###Output
_____no_output_____
###Markdown
Normalization of equations Choose scaling parameters such that nondimensional variables are of order 1: $\tau_0 = 1/(\alpha_1 g_E g_I E_{set} I_{set})$ $\tau_{ff} = \tau_0$ $E_0 = E_{set}$ $I_0 = I_{set}$ $W_{EE0} = R/g_I$ $W_{EI0} = R/g_E$ $W_{IE0} = 1/(\alpha g_E)$ $W_{II0} = 1/g_I$ Define new parameters: $\epsilon_E = \tau_E/\tau_0$ $\epsilon_I = \tau_I/\tau_0$ $R = E_{set}/I_{set}$ $\alpha = \alpha_1/\alpha_3$ $g = g_I/g_E$ $\theta_E = (g_E/E_{set}) \Theta_E$ $\theta_I = (g_I/I_{set}) \Theta_I$ $c_{ext} = (g_E/E_{set})C_{ext}$
###Code
var('epsilon_E,epsilon_I')
var('theta_E,theta_I,R,g,alpha,i_ext')
normaliz = [tau_0==1/(alpha_1*g_E*g_I*E_set*I_set),tau_ff==tau_0,tau_E==epsilon_E*tau_0,tau_I==epsilon_I*tau_0,E_0==E_set,I_0==I_set,W_EE0==R/g_I,W_EI0==R/g_E,W_IE0==1/(alpha*g_E),W_II0==1/g_I,I_set==E_set/R,Theta_E==theta_E*E_set/g_E,Theta_I==theta_I*I_set/g_I,g_E==g*g_I,alpha_1==alpha*alpha_3,I_ext==(E_set/g_E)*i_ext]
show(normaliz)
###Output
_____no_output_____
###Markdown
Replacement rules: numerical values, parameter definitions, nondimensionalized weights
###Code
parameters = [alpha==alpha_1/alpha_3,tau_ff==tau_0,epsilon_E==tau_E/tau_0,epsilon_I==tau_I/tau_0,g==g_E/g_I,R==E_set/I_set,theta_E==(g_E/E_set)*Theta_E,theta_I==(g_I/I_set)*Theta_I,i_ext==(g_E/E_set)*I_ext,W_EE0==R/g_I,W_EI0==R/g_E,W_IE0==1/(alpha*g_E),W_II0==1/g_I]
weights = [w_EE==W_EE/W_EE0,w_IE==W_IE/W_IE0,w_EI==W_EI/W_EI0,w_II==W_II/W_II0]
###Output
_____no_output_____
###Markdown
Nondimensionalized, normalized equations
###Code
f_e_norm = f_e.subs(normaliz).subs(normaliz).subs(normaliz).expand()
f_i_norm = f_i.subs(normaliz).subs(normaliz).subs(normaliz).expand()
f_wEE_norm = f_wEE.subs(normaliz).subs(normaliz).subs(normaliz).expand().factor()
f_wEI_norm = f_wEI.subs(normaliz).subs(normaliz).subs(normaliz).expand()
f_wIE_norm = f_wIE.subs(normaliz).subs(normaliz).expand().factor()
f_wII_norm = f_wII.subs(normaliz).subs(normaliz).subs(normaliz).expand()
show(f_e_norm)
show(f_i_norm)
show(f_wEE_norm)
show(f_wEI_norm)
show(f_wIE_norm)
show(f_wII_norm)
###Output
_____no_output_____
###Markdown
Approximation: Quasi-steady-state solutions for the neural subsystem Neural dynamics is much faster than synaptic dynamics, thus we assume E and I are in "equilibrium" (~instantaneous convergence to quasi-steady-state values, as if the weights were fixed) while the synaptic weights evolve according to their slow dynamics. That is, we consider $\tau_E \ll \tau_0$ and $\tau_I \ll \tau_0$ thus $\epsilon_E \ll 1$ and $\epsilon_E \ll 1$ thus $\epsilon_E\frac{de}{d\tau} \sim 0$ and $\epsilon_I\frac{di}{d\tau} \sim 0$:
###Code
neuralFixedPoint_nondim = solve([f_e_norm.subs(dedtau==0),f_i_norm.subs(didtau==0)],e,i)
e_ss = neuralFixedPoint_nondim[0][0]
i_ss = neuralFixedPoint_nondim[0][1]
show(e_ss)
show(i_ss)
###Output
_____no_output_____
###Markdown
Stability of the Up state Jacobian of the nondimensionalized synaptic dynamics in the QSS approximation Let $\frac{dw_{EE}}{d\tau} = f_1(w_{EE},w_{IE})$ $\frac{dw_{IE}}{d\tau} = f_2(w_{EE},w_{IE})$ then the Jacobian matrix is$J_{qss} = \begin{pmatrix}\frac{\partial f_1}{\partial w_{EE}} & \frac{\partial f_1}{\partial w_{IE}} \\\frac{\partial f_2}{\partial w_{EE}} & \frac{\partial f_2}{\partial w_{IE}}\end{pmatrix}$ Stability analysis: we should evaluate $J_{qss}$ at the QSS fixed point and compute its eigenvalues. If at least one eigenvalue has positive real part, then the fixed point is unstable. Jacobian matrix The Jacobian matrix $J$ is the matrix of all the first-order partial derivatives of the learning rule with respect to the weights. In terms of the QSS synaptic system: $\displaystyle \frac{dw_{EE}}{d \tau} = f_1(w_{EE},w_{IE})$ $\displaystyle \frac{dw_{IE}}{d \tau} = f_2(w_{EE},w_{IE})$ then \begin{array}{lll}J_{11} = \displaystyle \frac{\partial f_1}{\partial w_{EE}} & J_{12} = \displaystyle \frac{\partial f_1}{\partial w_{IE}} \\J_{21} = \displaystyle \frac{\partial f_2}{\partial w_{EE}} & J_{22} = \displaystyle \frac{\partial f_2}{\partial w_{IE}}\end{array} Instead of substituting $e = e_{ss}(w_{EE},w_{EI},w_{IE},w_{II})$ and $i = i_{ss}(w_{EE},w_{EI},w_{IE},w_{II})$ directly into $f_1(e,i), \ldots$ before differentiating, we will use the chain rule: Actual computation using the chain rule The learning rule is expressed as a composition of functions $e$ and $i$ of the weights: $\displaystyle \frac{dw_{EE}}{d \tau} = f_1(e,i)$ $\displaystyle \frac{dw_{EI}}{d \tau} = f_2(e,i)$ Using the chain rule, the Jacobian matrix elements can be computed as $J_{11} = \displaystyle \frac{\partial f_1}{\partial w_{EE}} = \frac{\partial f_1}{\partial e} \frac{\partial e}{\partial w_{EE}} + \frac{\partial f_1}{\partial i} \frac{\partial i}{\partial w_{EE}}$ $J_{12} = \displaystyle \frac{\partial f_1}{\partial w_{IE}} = \frac{\partial f_1}{\partial e} \frac{\partial e}{\partial w_{IE}} + \frac{\partial f_1}{\partial i} \frac{\partial i}{\partial w_{IE}}$ $J_{21} = \displaystyle \frac{\partial f_2}{\partial w_{EE}} = \frac{\partial f_2}{\partial e} \frac{\partial e}{\partial w_{EE}} + \frac{\partial f_2}{\partial i} \frac{\partial i}{\partial w_{EE}}$ $J_{22} = \displaystyle \frac{\partial f_2}{\partial w_{IE}} = \frac{\partial f_2}{\partial e} \frac{\partial e}{\partial w_{IE}} + \frac{\partial f_2}{\partial i} \frac{\partial i}{\partial w_{IE}}$ Here we consider $w_{EI}$ and $w_{II}$ as functions of $w_{EE}$ and $w_{IE}$: $e = e_{ss}(w_{EE},w_{EI}(w_{EE},w_{IE}),w_{IE},w_{II}(w_{EE},w_{IE}))$ $i = i_{ss}(w_{EE},w_{EI}(w_{EE},w_{IE}),w_{IE},w_{II}(w_{EE},w_{IE}))$ If the learning rule includes explicit dependence on the weights (as in this case) we need a more general expression: $\frac{dw_{EE}}{d \tau} = f_1(e,i,w_{EE},w_{EI},\ldots)$ $\frac{dw_{EI}}{d \tau} = f_2(e,i,w_{EE},w_{EI},\ldots)$ ... etc first substitute $w_{EE} \rightarrow p_{EE}$ $w_{EI} \rightarrow p_{EI}, \ldots$ to get $\displaystyle \frac{dw_{EE}}{d \tau} = f_1(e,i,p_{EE},p_{EI},\ldots)$ $\displaystyle \frac{dw_{EI}}{d \tau} = f_2(e,i,p_{EE},p_{EI},\ldots)$ and then take the partial derivatives: $J_{11} = \displaystyle \frac{\partial f_1}{\partial w_{EE}} = \frac{\partial f_1}{\partial e} \frac{\partial e}{\partial w_{EE}} + \frac{\partial f_1}{\partial i} \frac{\partial i}{\partial w_{EE}} + \frac{\partial f_1}{\partial p_{EE}}$ (because $\frac{\partial p_{EE}}{\partial w_{EE}}=1$ and $\frac{\partial p_{EI}}{\partial w_{EE}}=0 \ldots$) $J_{12} = \displaystyle \frac{\partial f_1}{\partial w_{EI}} = \frac{\partial f_1}{\partial e} \frac{\partial e}{\partial w_{EI}} + \frac{\partial f_1}{\partial i} \frac{\partial i}{\partial w_{EI}} + \frac{\partial f_1}{\partial p_{EI}}$ (because $\frac{\partial p_{EE}}{\partial w_{EI}}=0$ and $\frac{\partial p_{EI}}{\partial w_{EI}}=1 \ldots$) ...etc
###Code
var('dedwEE_ph,didwEE_ph') #placeholders
var('dedwEI_ph,didwEI_ph')
var('dedwIE_ph,didwIE_ph')
var('dedwII_ph,didwII_ph')
J_11 = diff(dwEEdtau.subs(f_wEE_norm),e)*dedwEE_ph + diff(dwEEdtau.subs(f_wEE_norm),i)*didwEE_ph + diff(dwEEdtau.subs(f_wEE_norm),w_EE)
J_12 = diff(dwEEdtau.subs(f_wEE_norm),e)*dedwEI_ph + diff(dwEEdtau.subs(f_wEE_norm),i)*didwEI_ph + diff(dwEEdtau.subs(f_wEE_norm),w_EI)
J_13 = diff(dwEEdtau.subs(f_wEE_norm),e)*dedwIE_ph + diff(dwEEdtau.subs(f_wEE_norm),i)*didwIE_ph + diff(dwEEdtau.subs(f_wEE_norm),w_IE)
J_14 = diff(dwEEdtau.subs(f_wEE_norm),e)*dedwII_ph + diff(dwEEdtau.subs(f_wEE_norm),i)*didwII_ph + diff(dwEEdtau.subs(f_wEE_norm),w_II)
J_21 = diff(dwEIdtau.subs(f_wEI_norm),e)*dedwEE_ph + diff(dwEIdtau.subs(f_wEI_norm),i)*didwEE_ph + diff(dwEIdtau.subs(f_wEI_norm),w_EE)
J_22 = diff(dwEIdtau.subs(f_wEI_norm),e)*dedwEI_ph + diff(dwEIdtau.subs(f_wEI_norm),i)*didwEI_ph + diff(dwEIdtau.subs(f_wEI_norm),w_EI)
J_23 = diff(dwEIdtau.subs(f_wEI_norm),e)*dedwIE_ph + diff(dwEIdtau.subs(f_wEI_norm),i)*didwIE_ph + diff(dwEIdtau.subs(f_wEI_norm),w_IE)
J_24 = diff(dwEIdtau.subs(f_wEI_norm),e)*dedwII_ph + diff(dwEIdtau.subs(f_wEI_norm),i)*didwII_ph + diff(dwEIdtau.subs(f_wEI_norm),w_II)
J_31 = diff(dwIEdtau.subs(f_wIE_norm),e)*dedwEE_ph + diff(dwIEdtau.subs(f_wIE_norm),i)*didwEE_ph + diff(dwIEdtau.subs(f_wIE_norm),w_EE)
J_32 = diff(dwIEdtau.subs(f_wIE_norm),e)*dedwEI_ph + diff(dwIEdtau.subs(f_wIE_norm),i)*didwEI_ph + diff(dwIEdtau.subs(f_wIE_norm),w_EI)
J_33 = diff(dwIEdtau.subs(f_wIE_norm),e)*dedwIE_ph + diff(dwIEdtau.subs(f_wIE_norm),i)*didwIE_ph + diff(dwIEdtau.subs(f_wIE_norm),w_IE)
J_34 = diff(dwIEdtau.subs(f_wIE_norm),e)*dedwII_ph + diff(dwIEdtau.subs(f_wIE_norm),i)*didwII_ph + diff(dwIEdtau.subs(f_wIE_norm),w_II)
J_41 = diff(dwIIdtau.subs(f_wII_norm),e)*dedwEE_ph + diff(dwIIdtau.subs(f_wII_norm),i)*didwEE_ph + diff(dwIIdtau.subs(f_wII_norm),w_EE)
J_42 = diff(dwIIdtau.subs(f_wII_norm),e)*dedwEI_ph + diff(dwIIdtau.subs(f_wII_norm),i)*didwEI_ph + diff(dwIIdtau.subs(f_wII_norm),w_EI)
J_43 = diff(dwIIdtau.subs(f_wII_norm),e)*dedwIE_ph + diff(dwIIdtau.subs(f_wII_norm),i)*didwIE_ph + diff(dwIIdtau.subs(f_wII_norm),w_IE)
J_44 = diff(dwIIdtau.subs(f_wII_norm),e)*dedwII_ph + diff(dwIIdtau.subs(f_wII_norm),i)*didwII_ph + diff(dwIIdtau.subs(f_wII_norm),w_II)
J = matrix([[J_11,J_12,J_13,J_14],[J_21,J_22,J_23,J_24],[J_31,J_32,J_33,J_34],[J_41,J_42,J_43,J_44]])
show(J)
###Output
_____no_output_____
###Markdown
In order to compute the eigenvalues of the Jacobian matrix we need: - the (quasi)steady-state values $e=e_{ss}(w_{EE},w_{EI},w_{IE},w_{II})$ and $i=i_{ss}(w_{EE},w_{EI},w_{IE},w_{II})$ - the follower functions $w_{EI} = w_{EIff}(w_{EE},w_{IE})$ and $w_{II} = w_{IIff}(w_{EE},w_{IE})$ - the partial derivatives $\displaystyle \frac{de}{dw_{EE}} \ldots$ - the fixed point values $w_{EEup}, w_{EIup},\ldots$ Quasi-steady-state values of $e$ and $i$
###Code
show(e_ss)
show(i_ss)
###Output
_____no_output_____
###Markdown
Partial derivatives
###Code
dedwEE = diff(e.subs(e_ss),w_EE).factor()
dedwEI = diff(e.subs(e_ss),w_EI).factor()
dedwIE = diff(e.subs(e_ss),w_IE).factor()
dedwII = diff(e.subs(e_ss),w_II).factor()
didwEE = diff(i.subs(i_ss),w_EE).factor()
didwEI = diff(i.subs(i_ss),w_EI).factor()
didwIE = diff(i.subs(i_ss),w_IE).factor()
didwII = diff(i.subs(i_ss),w_II).factor()
dxdw = [dedwEE_ph==dedwEE,dedwEI_ph==dedwEI,dedwIE_ph==dedwIE,dedwII_ph==dedwII,didwEE_ph==didwEE,didwEI_ph==didwEI,didwIE_ph==didwIE,didwII_ph==didwII]
###Output
_____no_output_____
###Markdown
Fixed points Fixed points are defined by $\frac{d w_{EE}}{d\tau}=0$, etc. 1. Fixed points without substitution: (i.e. in terms of $e$ and $i$)
###Code
synapticFixedPoint_qss_nosubs = solve([f_wEE_norm.subs(dwEEdtau==0),f_wEI_norm.subs(dwEIdtau==0),f_wIE_norm.subs(dwIEdtau==0),f_wII_norm.subs(dwIIdtau==0)],e,i,w_EI,w_II)
synapticFixedPoint_qss_nosubs_up = synapticFixedPoint_qss_nosubs[1]
show(synapticFixedPoint_qss_nosubs_up)
###Output
_____no_output_____
###Markdown
2. Fixed points with substitution (i.e. explicit values of the weights). Solve for the weight values at the Up state:
###Code
synapticFixedPoint_qss_subs = solve([e_ss.subs(e==1),i_ss.subs(i==1)],w_EI,w_II)
synapticFixedPoint_qss_subs_up = synapticFixedPoint_qss_subs[0]
show(synapticFixedPoint_qss_subs_up)
###Output
_____no_output_____
###Markdown
Interpretation of the above expressions: any combination of values of $w_{EE}$ and $w_{IE}$ is a steady state of the learning rule (stability to be proved yet). Confirm obtained solution:
###Code
show(e_ss.subs(synapticFixedPoint_qss_subs_up).full_simplify())
show(i_ss.subs(synapticFixedPoint_qss_subs_up).full_simplify())
###Output
_____no_output_____
###Markdown
Fixed-point values of the original weights at the Up state:
###Code
synapticFixedPoint_qss_subs_up_orig = [W_EIup,W_IIup]
show(synapticFixedPoint_qss_subs_up_orig)
###Output
_____no_output_____
###Markdown
Evaluation of the Jacobian matrix at the Up state fixed point The Jacobian matrix evaluated at the Up state:
###Code
J_up = J.subs(synapticFixedPoint_qss_nosubs_up).subs(dxdw).subs(synapticFixedPoint_qss_subs_up).simplify()
show(J_up)
J_up.eigenvalues()
###Output
_____no_output_____
###Markdown
Recall that the steady-state solutions of the learning rule form a 2D plane in 4D phase space, and thus two out of the four eigenvalues are expected to be zero (meaning there is no dynamics along the plane). If both nonzero eigenvalues have negative real part then the learning rule is stable; if any of them has positive real part, then the rule is unstable. Linear stability of the directions with nonzero eigenvalues Take the nonzero eigenvalues and check whether any of them has positive real part:
###Code
J_up_eigvalues = J_up.eigenvalues()
J_up_eigvalues1_aux = J_up_eigvalues[0].factor()
J_up_eigvalues2_aux = J_up_eigvalues[1].factor()
show(J_up_eigvalues1_aux)
show(J_up_eigvalues2_aux)
###Output
_____no_output_____
###Markdown
Rewrite eigenvalues Define $A,B,C$ and write the eigenvalues in terms of them: (this section might depend on the particular implementation of Sage, as specific terms and factors are extracted from the eigenvalue expression)
###Code
A = ((sum([J_up_eigvalues1_aux.numerator().operands()[k] for k in [0..6,8]]))/R).expand() # all terms in the numerator but the square root
B = J_up_eigvalues1_aux.numerator().operands()[7].operands()[0].operands()[0] # argument of the square root with factor R
C = (J_up_eigvalues1_aux.denominator()/R).expand() # denominator
show(A)
show(B)
show(C)
###Output
_____no_output_____
###Markdown
(I've defined $A$ and $C$ as divided by $R$ to help Sage simplify the following expressions more easily; see below)
###Code
BA2diff = (B - A^2).expand().factor()
show(BA2diff)
###Output
_____no_output_____
###Markdown
Note that $D$ is positive:
###Code
D = (BA2diff/C - (2*A + C)).full_simplify()
show(D)
###Output
_____no_output_____
###Markdown
Define $F = 2A + C + D$:
###Code
F = (2*A + C + D).expand()
show(F)
###Output
_____no_output_____
###Markdown
Then the eigenvalues now can be expressed as: $$\lambda_{\pm} = \frac{A \pm \sqrt{A^2 + FC}}{C} \hspace{1cm} \mbox{(J_up_eigvalue_build)}$$ Confirm that the difference between J_up_eigvalue and J_up_eigvalue_build is zero:
###Code
J_up_eigvalues1_build = (A - sqrt((A^2 + F*C)))/C
(J_up_eigvalues1_build - J_up_eigvalues1_aux).full_simplify()
###Output
_____no_output_____
###Markdown
Note also that $C$ must be positive because it is related to one of the stability conditions for the neural subsystem. First express $C$ in terms of the original parameters:
###Code
C_orig = (C.subs(weights).subs(parameters).subs(parameters)).expand()
show(C_orig)
###Output
_____no_output_____
###Markdown
Now evaluate the first stability condition for the neural subsystem at the Up state:
###Code
show(neural_stable_detcond_v2)
neural_stable_detcond_aux = (neural_stable_detcond_v2*(2*g_E*alpha_1/(E_set*alpha_3))).expand() # multiply by positive factors only to preserve the inequality
show(neural_stable_detcond_aux)
###Output
_____no_output_____
###Markdown
Note that the left-hand side of the last expression is equal to $C$:
###Code
show(neural_stable_detcond_aux.lhs() - C_orig)
###Output
_____no_output_____
###Markdown
Analytic expression for the stability condition of the rule Both eigenvalues have negative real part if $F<0$ and $A<0$: if $F < 0$ then $A^2 + FC < A^2$ and then $\sqrt{A^2 + FC} < |A|$, and if in addition $A<0$ then $A \pm \sqrt{A^2 + DC} < 0$. 1. $F<0$
###Code
aux = solve(synapticFixedPoint_qss_subs_up[1],w_IE)
F_v2 = F.subs(aux).subs(weights).subs(parameters).subs(parameters).expand()
synaptic_stable_cond1 = ((F*E_set*I_set*alpha_3*g_I).expand().subs(weights).subs(parameters).subs(parameters) < 0) # multiply by positive factors only
synaptic_stable_cond1_v2 = ((F_v2 < 0)*E_set^2*I_set*g_I^2*alpha_3/2).expand() # multiply by positive factors only
synaptic_stable_cond1_v4 = ((Theta_E - I_ext)*I_set*Theta_I*alpha_1*g_E*g_I + E_set^3*alpha_3)*g_E*g_I + (W_II*g_I + 1)*((Theta_E - I_ext)*I_set^2*alpha_1*g_E^2*g_I - E_set^2*I_set*alpha_1*g_E^2) < (W_EE*g_E - 1)*(E_set*I_set*Theta_I*alpha_1*g_E*g_I^2 + E_set^2*I_set*alpha_3*g_I^2)
show(synaptic_stable_cond1)
show(synaptic_stable_cond1_v2)
show(synaptic_stable_cond1_v4)
# confirm
show((synaptic_stable_cond1_v4.lhs()-synaptic_stable_cond1_v4.rhs()-synaptic_stable_cond1_v2.lhs()).expand())
###Output
_____no_output_____
###Markdown
The condition can be expressed as: $a_1 + b_1(W_{II}g_I+1) < b_1^\prime(W_{EE}g_E-1)$ where $a_1 = (I_{set} (\Theta_E - I_{ext}) \Theta_I \alpha_1 g_E g_I + E_{set}^3 \alpha_3)g_E g_I$ $b_1 = I_{set}^2 (\Theta_E - I_{ext}) \alpha_1 g_E^2 g_I - E_{set}^2 I_{set} \alpha_1 g_E^2$ $b_1^\prime = E_{set} I_{set} \Theta_I \alpha_1 g_E g_I^2 + E_{set}^2 I_{set} \alpha_3 g_I^2$ It is hard to determine whether it is satisfied or not in the general case: the trace condition for the stability the neural subsystem states that $(W_{II} g_I + 1)\tau_E > (W_{EE}g_E - 1)\tau_I$ but on the other hand it seems likely that $b_1<b_1^\prime$ and in addition the left-hand side has an aditional positive term $a$. If $I_{ext}$ is large enough then the condition seems easier to satisfy (i.e. learning rule stable) because the left-hand side is smaller. 2. $A<0$
###Code
A_orig = A.subs(aux).subs(weights).subs(parameters).subs(parameters).expand()
synaptic_stable_cond2 = ((A*E_set*I_set*alpha_3*g_I).expand().subs(weights).subs(parameters).subs(parameters) < 0) # multiply by positive factors only
synaptic_stable_cond2_v2 = ((A_orig < 0)*E_set^2*g_I^2*alpha_3).expand() # multiply by positive factors only
synaptic_stable_cond2_v3 = 2*(Theta_E - I_ext)*Theta_I*alpha_1*g_E^2*g_I^2 + (W_II*g_I + 1)*(2*I_set*(Theta_E - I_ext)*alpha_1*g_E^2*g_I - E_set^2*alpha_1*g_E^2) < (W_EE*g_E - 1)*(2*E_set*Theta_I*alpha_1*g_E*g_I^2 + E_set^2*alpha_3*g_I^2)
show(A)
show(A_orig)
show(synaptic_stable_cond2)
show(synaptic_stable_cond2_v2)
show(synaptic_stable_cond2_v3)
# confirm
show((synaptic_stable_cond2_v3.lhs()-synaptic_stable_cond2_v3.rhs()-synaptic_stable_cond2_v2.lhs()).expand())
###Output
_____no_output_____
###Markdown
The condition can be expressed as: $a_2 + b_2(W_{II}g_I+1) < b_2^\prime(W_{EE}g_E-1)$ where $a_2 = 2 (\Theta_E - I_{ext}) \Theta_I \alpha_1 g_E^2 g_I^2$ $b_2 = 2 I_{set} (\Theta_E - I_{ext}) \alpha_1 g_E^2 g_I - E_{set}^2 \alpha_1 g_E^2$ $b_2^\prime = 2 E_{set} \Theta_I \alpha_1 g_E g_I^2 + E_{set}^2 \alpha_3 g_I^2$ Similar analysis as before, the condition is likely satisfied for biologically backed parameter values but it is hard to decide it in the general case. Numerical analysis Stability and instability conditions in terms of the free weights $W_{EE}$ and $W_{IE}$
###Code
show(neural_stable_detcond_v2)
show(neural_stable_trcond_v2)
show(paradox_cond)
show(up_exist_cond_2_v2)
show(positive_WEI_cond)
show(positive_WII_cond)
show(synaptic_stable_cond1)
show(synaptic_stable_cond2)
###Output
_____no_output_____
###Markdown
Paradoxical conditions
###Code
W_EEup = W_EE==5
W_IEup = W_IE==10
alphas = [alpha_1==0.002,alpha_3==0.002]
upstate_orig = [W_EEup,
W_EI==W_EI.subs(synapticFixedPoint_qss_subs_up_orig).subs(values_paradoxical).subs(W_EEup),
W_IEup,
W_II==W_II.subs(synapticFixedPoint_qss_subs_up_orig).subs(values_paradoxical).subs(W_IEup)]
upstate_norm = [w_EE==w_EE.subs(weights).subs(parameters).subs(parameters).subs(values_paradoxical).subs(upstate_orig),
w_EI==w_EI.subs(weights).subs(parameters).subs(parameters).subs(values_paradoxical).subs(upstate_orig),
w_IE==w_IE.subs(weights).subs(parameters).subs(parameters).subs(values_paradoxical).subs(upstate_orig),
w_II==w_II.subs(weights).subs(parameters).subs(parameters).subs(values_paradoxical).subs(upstate_orig)]
show(upstate_orig)
show(upstate_norm)
probe = [W_EE==5,W_IE==10]
positive_WEI_cond_border_pdx = solve(positive_WEI_cond.lhs()==positive_WEI_cond.rhs(),W_EE)[0].subs(values_paradoxical)
positive_WII_cond_border_pdx = solve(positive_WII_cond.lhs()==positive_WII_cond.rhs(),W_IE)[0].subs(values_paradoxical)
paradox_cond_border_pdx = solve(paradox_cond.left()==paradox_cond.right(),W_EE,W_IE)[0][0].subs(values_paradoxical)
neural_stable_detcond_v2_border_pdx = solve(neural_stable_detcond_v2.lhs()==neural_stable_detcond_v2.rhs(),W_IE)[0].subs(values_paradoxical)
neural_stable_trcond_v2_border_pdx = solve(neural_stable_trcond_v2.lhs()==neural_stable_trcond_v2.rhs(),W_IE)[0].subs(values_paradoxical)
synaptic_stable_cond1_border_pdx = solve(synaptic_stable_cond1.lhs()==synaptic_stable_cond1.rhs(),W_IE)[0].subs(values_paradoxical).subs(alphas)
synaptic_stable_cond2_border_pdx = solve(synaptic_stable_cond2.lhs()==synaptic_stable_cond2.rhs(),W_IE)[0].subs(values_paradoxical).subs(alphas)
if (I_ext-Theta_E).subs(values_paradoxical) > 0:
up_exist_cond_2_v2_border_pdx = solve(up_exist_cond_2_v2_pos.lhs()==up_exist_cond_2_v2_pos.rhs(),W_IE)[0].subs(values_paradoxical)
else:
up_exist_cond_2_v2_border_pdx = solve(up_exist_cond_2_v2_neg.lhs()==up_exist_cond_2_v2_neg.rhs(),W_IE)[0].subs(values_paradoxical)
print("PARADOXICAL CONDITIONS")
print("positive_WEI_cond:")
print(" ",positive_WEI_cond)
print(" border: ",positive_WEI_cond_border_pdx)
print(" probe: ",bool(positive_WEI_cond.subs(values_paradoxical).subs(probe)))
print("positive_WII_cond:")
print(" ",positive_WII_cond)
print(" border: ",positive_WII_cond_border_pdx)
print(" probe: ",bool(positive_WII_cond.subs(values_paradoxical).subs(probe)))
print("paradox_cond:")
print(" ",paradox_cond)
print(" border: ",paradox_cond_border_pdx)
print(" probe: ",bool(paradox_cond.subs(values_paradoxical).subs(probe)))
print("neural_stable_detcond_v2:")
print(" ",neural_stable_detcond_v2)
print(" border: ",neural_stable_detcond_v2_border_pdx)
print(" probe: ",bool(neural_stable_detcond_v2.subs(values_paradoxical).subs(probe)))
print("neural_stable_trcond_v2:")
print(" ",neural_stable_trcond_v2)
print(" border: ",neural_stable_trcond_v2_border_pdx)
print(" probe: ",bool(neural_stable_trcond_v2.subs(values_paradoxical).subs(probe)))
print("synaptic_stable_cond1:")
print(" ",synaptic_stable_cond1)
print(" border: ",synaptic_stable_cond1_border_pdx)
print(" probe: ",bool(synaptic_stable_cond1.subs(values_paradoxical).subs(alphas).subs(probe)))
print("synaptic_stable_cond2:")
print(" ",synaptic_stable_cond2)
print(" border: ",synaptic_stable_cond2_border_pdx)
print(" probe: ",bool(synaptic_stable_cond2.subs(values_paradoxical).subs(alphas).subs(probe)))
if (I_ext-Theta_E).subs(values_paradoxical) > 0:
print("up_exist_cond_2_v2_pos:")
print(" ",up_exist_cond_2_v2_pos)
print(" border: ",up_exist_cond_2_v2_border_pdx)
print(" probe: ",bool(up_exist_cond_2_v2_pos.subs(values_paradoxical).subs(probe)))
else:
print("up_exist_cond_2_v2_neg:")
print(" ",up_exist_cond_2_v2_neg)
print(" border: ",up_exist_cond_2_v2_border_pdx)
print(" probe: ",bool(up_exist_cond_2_v2_neg.subs(values_paradoxical).subs(probe)))
W_EE_max = 10
fig1 = line([[W_EE.subs(positive_WEI_cond_border_pdx),0],[W_EE.subs(positive_WEI_cond_border_pdx),50]],color='blue',linestyle='-',legend_label='positive WEI')
fig2 = line([[0,W_IE.subs(positive_WII_cond_border_pdx)],[W_EE_max,W_IE.subs(positive_WII_cond_border_pdx)]],color='blue',linestyle='--',legend_label='positive WEI')
fig3 = line([[W_EE.subs(paradox_cond_border_pdx),0],[W_EE.subs(paradox_cond_border_pdx),50]],color='magenta',legend_label='paradoxical')
fig4 = plot(W_IE.subs(neural_stable_detcond_v2_border_pdx),(W_EE,0,W_EE_max),ymin=0,color='green',linestyle='-',legend_label='neural detcond')
fig5 = plot(W_IE.subs(neural_stable_trcond_v2_border_pdx),(W_EE,0,W_EE_max),ymin=0,color='green',linestyle='--',legend_label='neural trcond')
#fig6 = plot(W_IE.subs(up_exist_cond_2_v2_border_pdx),(W_EE,0,W_EE_max),ymin=0,color='black',linestyle='--',legend_label='up exists')
fig7 = plot(W_IE.subs(synaptic_stable_cond1_border_pdx),(W_EE,0,W_EE_max),ymin=0,color='black',linestyle='-',legend_label='synaptic stable 1')
fig8 = plot(W_IE.subs(synaptic_stable_cond2_border_pdx),(W_EE,0,W_EE_max),ymin=0,color='black',linestyle='--',legend_label='synaptic stable 2')
tt1 = text('(positive\nWEI)', (0.1+W_EE.subs(positive_WEI_cond_border_pdx),18),color='blue',horizontal_alignment='left')
tt2 = text('(positive WII)', (2.5,0.5+W_IE.subs(positive_WII_cond_border_pdx)),color='blue',horizontal_alignment='left')
tt3 = text('(paradoxical)', (0.1+W_EE.subs(paradox_cond_border_pdx),20),color='magenta',horizontal_alignment='left')
tt4 = text('(detcond\nstable)', (4,14),color='green',horizontal_alignment='left')
tt5 = text('(trcond\nstable)', (7,7.5),color='green',horizontal_alignment='left')
#tt6 = text('(up exists)', (3.1,10),color='black',horizontal_alignment='left')
tt7 = text('(synaptic\nstable 1)', (4.6,20),color='black',horizontal_alignment='left')
tt8 = text('(synaptic\nstable 2)', (4.3,17.5),color='black',horizontal_alignment='left')
#fig = fig1 + fig2 + fig3 + fig4 + fig5 + fig6 + fig7 + fig8 + tt1 + tt2 + tt3 + tt4 + tt5 + tt6 + tt7 + tt8
fig = fig1 + fig2 + fig3 + fig4 + fig5 + fig7 + fig8 + tt1 + tt2 + tt3 + tt4 + tt5 + tt7 + tt8
fig.xmin(0)
fig.xmax(W_EE_max)
fig.ymin(0)
fig.ymax(20)
fig.axes_labels(['$W_{EE}$', '$W_{IE}$'])
fig.set_legend_options(loc='upper right')
fig.legend(False)
#fig.save('FB_stability_paradoxical.pdf')
show(fig)
###Output
_____no_output_____
###Markdown
Non-paradoxical conditions
###Code
W_EEup = W_EE==5
W_IEup = W_IE==10
upstate_orig = [W_EEup,
W_EI==W_EI.subs(synapticFixedPoint_qss_subs_up_orig).subs(values_nonparadoxical).subs(W_EEup),
W_IEup,
W_II==W_II.subs(synapticFixedPoint_qss_subs_up_orig).subs(values_nonparadoxical).subs(W_IEup)]
upstate_norm = [w_EE==w_EE.subs(weights).subs(parameters).subs(parameters).subs(values_nonparadoxical).subs(upstate_orig),
w_EI==w_EI.subs(weights).subs(parameters).subs(parameters).subs(values_nonparadoxical).subs(upstate_orig),
w_IE==w_IE.subs(weights).subs(parameters).subs(parameters).subs(values_nonparadoxical).subs(upstate_orig),
w_II==w_II.subs(weights).subs(parameters).subs(parameters).subs(values_nonparadoxical).subs(upstate_orig)]
show(upstate_orig)
show(upstate_norm)
probe = [W_EE==5,W_IE==10]
positive_WEI_cond_border_nonpdx = solve(positive_WEI_cond.lhs()==positive_WEI_cond.rhs(),W_EE)[0].subs(values_nonparadoxical)
positive_WII_cond_border_nonpdx = solve(positive_WII_cond.lhs()==positive_WII_cond.rhs(),W_IE)[0].subs(values_nonparadoxical)
paradox_cond_border_nonpdx = solve(paradox_cond.left()==paradox_cond.right(),W_EE,W_IE)[0][0].subs(values_nonparadoxical)
neural_stable_detcond_v2_border_nonpdx = solve(neural_stable_detcond_v2.lhs()==0,W_IE)[0].subs(values_nonparadoxical)
neural_stable_trcond_v2_border_nonpdx = solve(neural_stable_trcond_v2.lhs()==0,W_IE)[0].subs(values_nonparadoxical)
synaptic_stable_cond1_border_nonpdx = solve(synaptic_stable_cond1.lhs()==synaptic_stable_cond1.rhs(),W_IE)[0].subs(values_nonparadoxical).subs(alphas)
synaptic_stable_cond2_border_nonpdx = solve(synaptic_stable_cond2.lhs()==synaptic_stable_cond2.rhs(),W_IE)[0].subs(values_nonparadoxical).subs(alphas)
if (I_ext-Theta_E).subs(values_nonparadoxical) > 0:
up_exist_cond_2_v2_border_nonpdx = solve(up_exist_cond_2_v2_pos.lhs()==up_exist_cond_2_v2_pos.rhs(),W_IE)[0].subs(values_nonparadoxical)
else:
up_exist_cond_2_v2_border_nonpdx = solve(up_exist_cond_2_v2_neg.lhs()==up_exist_cond_2_v2_neg.rhs(),W_IE)[0].subs(values_nonparadoxical)
print("NONPARADOXICAL CONDITIONS")
print("positive_WEI_cond:")
print(" ",positive_WEI_cond)
print(" border: ",positive_WEI_cond_border_nonpdx)
print(" probe: ",bool(positive_WEI_cond.subs(values_nonparadoxical).subs(probe)))
print("positive_WII_cond:")
print(" ",positive_WII_cond)
print(" border: ",positive_WII_cond_border_nonpdx)
print(" probe: ",bool(positive_WII_cond.subs(values_nonparadoxical).subs(probe)))
print("paradox_cond:")
print(" ",paradox_cond)
print(" border: ",paradox_cond_border_nonpdx)
print(" probe: ",bool(paradox_cond.subs(values_nonparadoxical).subs(probe)))
print("neural_stable_detcond_v2:")
print(" ",neural_stable_detcond_v2)
print(" border: ",neural_stable_detcond_v2_border_nonpdx)
print(" probe: ",bool(neural_stable_detcond_v2.subs(values_nonparadoxical).subs(probe)))
print("neural_stable_trcond_v2:")
print(" ",neural_stable_trcond_v2)
print(" border: ",neural_stable_trcond_v2_border_nonpdx)
print(" probe: ",bool(neural_stable_trcond_v2.subs(values_nonparadoxical).subs(probe)))
print("synaptic_stable_cond1:")
print(" ",synaptic_stable_cond1)
print(" border: ",synaptic_stable_cond1_border_pdx)
print(" probe: ",bool(synaptic_stable_cond1.subs(values_nonparadoxical).subs(alphas).subs(probe)))
print("synaptic_stable_cond2:")
print(" ",synaptic_stable_cond2)
print(" border: ",synaptic_stable_cond2_border_pdx)
print(" probe: ",bool(synaptic_stable_cond2.subs(values_nonparadoxical).subs(alphas).subs(probe)))
if (I_ext-Theta_E).subs(values_nonparadoxical) > 0:
print("up_exist_cond_2_v2_pos:")
print(" ",up_exist_cond_2_v2_pos)
print(" border: ",up_exist_cond_2_v2_border_pdx)
print(" probe: ",bool(up_exist_cond_2_v2_pos.subs(values_nonparadoxical).subs(probe)))
else:
print("up_exist_cond_2_v2_neg:")
print(" ",up_exist_cond_2_v2_neg)
print(" border: ",up_exist_cond_2_v2_neg_border_pdx)
print(" probe: ",bool(up_exist_cond_2_v2_neg.subs(values_nonparadoxical).subs(probe)))
W_EE_max = 10
fig1 = line([[W_EE.subs(positive_WEI_cond_border_nonpdx),0],[W_EE.subs(positive_WEI_cond_border_nonpdx),50]],color='blue',linestyle='-',legend_label='positive WEI')
fig2 = line([[0,W_IE.subs(positive_WII_cond_border_nonpdx)],[W_EE_max,W_IE.subs(positive_WII_cond_border_nonpdx)]],color='blue',linestyle='--',legend_label='positive WEI')
fig3 = line([[W_EE.subs(paradox_cond_border_nonpdx),0],[W_EE.subs(paradox_cond_border_nonpdx),50]],color='magenta',legend_label='paradoxical')
fig4 = plot(W_IE.subs(neural_stable_detcond_v2_border_nonpdx),(W_EE,0,W_EE_max),ymin=0,color='green',linestyle='-',legend_label='neural detcond')
fig5 = plot(W_IE.subs(neural_stable_trcond_v2_border_nonpdx),(W_EE,0,W_EE_max),ymin=0,color='green',linestyle='--',legend_label='neural trcond')
#fig6 = plot(W_IE.subs(up_exist_cond_2_v2_border_nonpdx),(W_EE,0,W_EE_max),ymin=0,color='black',linestyle='--',legend_label='up exists')
fig7 = plot(W_IE.subs(synaptic_stable_cond1_border_nonpdx),(W_EE,0,W_EE_max),ymin=0,color='black',linestyle='-',legend_label='synaptic stable 1')
fig8 = plot(W_IE.subs(synaptic_stable_cond2_border_nonpdx),(W_EE,0,W_EE_max),ymin=0,color='black',linestyle='--',legend_label='synaptic stable 2')
tt1 = text('(positive WEI)', (0.1+W_EE.subs(positive_WEI_cond_border_nonpdx),20),color='blue',horizontal_alignment='left')
tt2 = text('(positive WII)', (2.5,0.5+W_IE.subs(positive_WII_cond_border_nonpdx)),color='blue',horizontal_alignment='left')
tt3 = text('(paradoxical)', (0.1+W_EE.subs(paradox_cond_border_nonpdx),18.5),color='magenta',horizontal_alignment='left')
tt4 = text('(detcond\n stable)', (0.9,9),color='green',horizontal_alignment='left')
tt5 = text('(trcond\nstable)', (7,7.5),color='green',horizontal_alignment='left')
#tt6 = text('(up exists)', (1.2,7),color='black',horizontal_alignment='left')
tt7 = text('(synaptic\n stable 1)', (0.2,15),color='black',horizontal_alignment='left')
tt8 = text('(synaptic\n stable 2)', (0.6,12),color='black',horizontal_alignment='left')
#fig = fig1 + fig2 + fig3 + fig4 + fig5 + fig6 + fig7 + fig8 + tt1 + tt2 + tt3 + tt4 + tt5 + tt6 + tt7 + tt8
fig = fig1 + fig2 + fig3 + fig4 + fig5 + fig7 + fig8 + tt1 + tt2 + tt3 + tt4 + tt5 + tt7 + tt8
fig.xmin(0)
fig.xmax(W_EE_max)
fig.ymin(0)
fig.ymax(20)
fig.axes_labels(['$W_{EE}$', '$W_{IE}$'])
fig.set_legend_options(loc='upper right')
fig.legend(False)
#fig.save('BA_stability_nonparadoxical.pdf')
show(fig)
###Output
_____no_output_____ |
Projeto4/Atividades/Aula03/Aula 03.ipynb | ###Markdown
Aula 03 Calibração de CâmeraEm aulas anteriores vimos que uma câmera pode ser representada por duas matrizes que modelam seus parâmetros intrínsecos e extrínsecos. O problema é que para os exercícios nós assumimos que esses parâmetros são conhecidos, mas no mundo real isso dificilmente acontece.Nesta atividade aprenderemos como obter uma estimativa desses parâmetros através de um processo de calibração. Observações Importantes (leia antes de continuar!)Muitas câmeras possuem uma opção de auto foco. Você deve **desabilitar o auto foco da sua câmera antes de realizar a calibração**. Caso contrário os parâmetros vão mudar a todo instante. Calibração com tabuleiros ChArUco*ChArUco* é a união dos termos *chessboard* e *ArUco* e é essencialmente o que o nome diz: um tabuleiro de xadrez com marcadores ArUco. O tabuleiro ChArUco é utilizado por ser facilmente detectado em uma imagem e os marcadores são utilizados para aprimorar a detecção dos cantos.O OpenCV já possui funções prontas para calibrar a câmera usando um tabuleiro ChArUco. IntuiçãoDiferentemente das outras atividades, não vamos implementar a calibração "na mão", mas gostaria de apresentar uma breve intuição do processo de calibração. Como vimos anteriormente, a projeção de um ponto 3D na imagem de uma câmera pode ser representada por uma sequência de multiplicações de matrizes:$$\begin{pmatrix}P'_x \\P'_y \\1\end{pmatrix}=\begin{pmatrix}f_x & 0 & p_x & 0 \\0 & f_y & p_y & 0 \\0 & 0 & 1 & 0\end{pmatrix}\begin{pmatrix}E_{11} & E_{12} & E_{13} & E_{14} \\E_{21} & E_{22} & E_{23} & E_{24} \\E_{31} & E_{32} & E_{33} & E_{34} \\0 & 0 & 0 & 1\end{pmatrix}\begin{pmatrix}P_x \\P_y \\P_z \\1\end{pmatrix}$$Onde $P'$ é o ponto na imagem, em pixels, $f$ é a distância focal, $p$ é o ponto principal, $E$ é a matriz de parâmetros extrínsecos e $P$ é o ponto em 3D que será projetado na imagem. Se multiplicarmos as matrizes de parâmetros intrínsecos e extrínsecos, temos:$$\begin{pmatrix}P'_x \\P'_y \\1\end{pmatrix}=\begin{pmatrix}C_{11} & C_{12} & C_{13} & C_{14} \\C_{21} & C_{22} & C_{23} & C_{24} \\C_{31} & C_{32} & C_{33} & C_{34}\end{pmatrix}\begin{pmatrix}P_x \\P_y \\P_z \\1\end{pmatrix}$$Onde $C$ é o resultado da multiplicação das duas matrizes. Se tivermos pares de pontos $P$ e $P'$ (pontos no espaço e a sua projeção na imagem) suficientes podemos montar um sistema de equações lineares como fizemos anteriormente para o cálculo de homografias.Entretanto, como podemos obter esses pares de pontos? Aí que entram os tabuleiros ChArUco! Em um tabuleiro ChArUco sabemos a localização de cada canto de cada marcador e de cada quadrado. Então já temos a posição $P_x$ e $P_y$ de diversos pontos, mas e a coordenada $P_z$? O tabuleiro inteiro está contido em um mesmo plano. Então podemos usar a mesma coordenada $P_z = 0$ para todos eles. Assim, os parâmetros extrínsecos serão responsáveis por mover o tabuleiro no plano $XY$ para a sua posição com relação à câmera com centro de projeção na origem.Ufa, acabamos! Se você não entendeu direito o que discutimos até aqui, não se preocupe! Talvez a prática com código ajude. Como eu disse antes, muito do que precisamos já está implementado em funções do OpenCV. Obtendo os pontos de referência O primeiro passo é obter os pontos de referência. Para isso, salve algumas imagens com o tabuleiro em diferentes posições e distâncias. Tente obter imagens distribuindo o tabuleiro o máximo possível em posições e orientações diferentes.Para cada imagem, verifique que o OpenCV é capaz de detectar pelo menos 4 marcadores do tabuleiro ChArUco. Guarde as posições dos marcadores em uma lista e os IDs em outra lista.Depois que tiver guardado as posições dos marcadores em todas as fotos, execute a função de calibração do OpenCV (`cv2.aruco.calibrateCameraCharuco`). Salve o resultado da calibração em um arquivo para não ter que calibrar todas as vezes. Funções úteis do OpenCVAs seguintes funções podem ser úteis para essa atividade:- `cv2.aruco.getPredefinedDictionary`- `cv2.aruco.CharucoBoard_create`- `cv2.aruco.detectMarkers`- `cv2.aruco.interpolateCornersCharuco`- `cv2.aruco.calibrateCameraCharuco`- `cv2.aruco.estimatePoseCharucoBoard`- `cv2.aruco.drawAxis`Além disso, os links no final do notebook podem ajudar bastante. Atividade1. Utilize o OpenCV para capturar pelo menos 10 imagens com o tabuleiro ChArUco em posições e orientações diferentes. 1. Para cada imagem, detecte os marcadores ArUco 2. Encontre os cantos do tabuleiro ChArUco, refinando a detecção (`cv2.aruco.interpolateCornersCharuco`) 3. Guarde os cantos e os IDs em listas2. Calibre a câmera com os pontos detectados em todas as imagens3. Salve o resultado da calibração em um arquivo4. Faça um programa que carrega a calibração da sua câmera, estima a pose (posição e orientação) do tabuleiro ChArUco e desenha os eixos (`cv2.aruco.drawAxis`). DesafiosCarregue o objeto da atividade da segunda aula (`teapot.obj`) e mostre-o sobre o tabuleiro utilizando a pose estimada pelo OpenCV. A função `cv2.projectPoints` pode ser útil para esse desafio.
###Code
import cv2
import numpy as np
import matplotlib.pyplot as plt
import cv2.aruco as aruco
import pickle
cap = cv2.VideoCapture(0)
i = 0
while(True):
ret, frame = cap.read() #640x480
# Show image, press esc to exit
cv2.imshow('clean image', frame)
k = cv2.waitKey(33)
if k == 27: #esc
break
elif k == 32:
cv2.imwrite("img"+str(i)+".jpeg",frame)
i += 1
else:
continue
# Clean everything up
cap.release()
cv2.destroyAllWindows()
# Item 1
# Mostre aqui pelo menos uma imagem com o tabuleiro detectado
# Detecting markers in real time
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
parameters = aruco.DetectorParameters_create()
charuco_board = cv2.aruco.CharucoBoard_create(5, 7, 0.032, 0.024, aruco_dict)
markers_corners = []
markers_ids = []
img_size = 0
for i in range(10):
frame = cv2.imread("img"+str(i)+".jpeg")
gray_image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
img_size = gray_image.shape
markers, ids, rejectedImgPoints = aruco.detectMarkers(gray_image, aruco_dict, parameters=parameters)
if (len(markers) <= 0):
print("Nenhum marcador encontrado.")
else:
_, corners, m_ids = cv2.aruco.interpolateCornersCharuco(markers, ids, gray_image, charuco_board)
markers_corners.append(corners)
markers_ids.append(m_ids)
img = frame.copy()
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.aruco.drawDetectedMarkers(img, markers)
plt.imshow(img)
# Itens 2 e 3
# Imprima aqui os resultados da calibração
#Calibração
c = cv2.aruco.calibrateCameraCharuco(markers_corners, markers_ids, charuco_board, img_size, None, None, flags=cv2.CALIB_RATIONAL_MODEL)
print(c)
#Salvar calibração em Pickle
with open('calibratecharuco.p', 'wb') as fp:
pickle.dump(c, fp, protocol=pickle.HIGHEST_PROTOCOL)
# Item 4
# Estime a pose do tabuleiro e desenhe os eixos (XYZ)
calibration = pickle.load(open('calibratecharuco.p', 'rb'))
# Detecting markers in real time
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
parameters = aruco.DetectorParameters_create()
charuco_board = cv2.aruco.CharucoBoard_create(5, 7, 0.032, 0.024, aruco_dict)
cap = cv2.VideoCapture(0)
while(True):
ret, frame = cap.read() #640x480
gray_image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
img_size = gray_image.shape
markers, ids, rejectedImgPoints = aruco.detectMarkers(gray_image, aruco_dict, parameters=parameters)
# if (len(markers) <= 0):
# continue
# # print("Nenhum marcador encontrado.")
# else:
if (len(markers) > 0):
found_markers = cv2.aruco.drawDetectedMarkers(frame, markers)
_, corners, m_ids = cv2.aruco.interpolateCornersCharuco(markers, ids, gray_image, charuco_board)
_, rvec, tvec = cv2.aruco.estimatePoseCharucoBoard(corners, m_ids, charuco_board, calibration[1], calibration[2])
try:
frame = aruco.drawAxis(frame, calibration[1], calibration[2], rvec, tvec, 0.1)
except:
pass
# Show image, press esc to exit
cv2.imshow('clean image', frame)
k = cv2.waitKey(33)
if k == 27: #esc
break
else:
continue
# Clean everything up
cap.release()
cv2.destroyAllWindows()
###Output
_____no_output_____ |
solutions/hw2/FINM36700_HW2_Group_A26-2.ipynb | ###Markdown
Section 2: Analyzing the Data
###Code
df_hedge_fund = pd.read_excel('proshares_analysis_data.xlsx',sheet_name='hedge_fund_series').set_index('date')
df_hedge_fund_annual = df_hedge_fund * 12
df_hedge_fund_annual.head()
###Output
_____no_output_____
###Markdown
__Problem 1__ : For the series in the “hedgefundseries” tab, report the following summary statistics: (a) mean (b) volatility (c) Sharpe Ratio
###Code
# mean
mu_annual = df_hedge_fund_annual.mean()
# volatility
vol_annual = df_hedge_fund_annual.std()/np.sqrt(12)
# Sharpe Ratio (w/ total returns)
sharpe_annual = mu_annual/vol_annual
table1 = pd.DataFrame({'Mean':mu_annual, 'Volatility':vol_annual, 'Sharpe ratio':sharpe_annual})
table1
###Output
_____no_output_____
###Markdown
__Problem 2__ : For the series in the “hedgefundseries” tab, , calculate the following statistics related to tail-risk.(a) Skewness (b) Excess Kurtosis (in excess of 3) (c) VaR (.05) - the fifth quantile of historic returns (d) CVaR (.05) - the mean of the returns at or below the fifth quantile(e) Maximum drawdown - include the dates of the max/min/recovery within the max drawdownperiod.
###Code
mu = df_hedge_fund.mean()
sigma = df_hedge_fund.std()
# skewness
skew = df_hedge_fund.skew()
# Excess Kurtosis
kurtosis = df_hedge_fund.kurtosis()
# VaR
VaR = df_hedge_fund.quantile(0.05)
# CVaR
CVaR = df_hedge_fund[df_hedge_fund<=VaR].mean()
table2 = pd.DataFrame({'Skewness':skew, 'Kurtosis':kurtosis, 'VaR':VaR, 'CVaR':CVaR})
table2
cum_returns = (1 + df_hedge_fund).cumprod()
rolling_max = cum_returns.cummax()
drawdown = (cum_returns - rolling_max) / rolling_max
max_drawdown = drawdown.idxmin()
drawdowns = pd.DataFrame({'Max Drawdown': drawdown.loc[max_drawdown['HFRIFWI Index']],
'Peak': cum_returns[:drawdown.idxmin()[0]].idxmax(),
'Bottom': drawdown.idxmin()})
drawdowns = drawdowns.assign(
RecoverDate = lambda x: drawdown.loc[x.Bottom[1]:].idxmax(),
TimeToRecover = lambda x: x.RecoverDate-x.Peak
)
drawdowns
###Output
_____no_output_____
###Markdown
__Problem 3__For the series in the “hedgefundseries” tab, run a regression of each against SPY (found in the“merrillfactors” tab.) Include an intercept. Report the following regression-based statistics:(a) Market Beta(b) Treynor Ratio(c) Information rati
###Code
df_merrill = pd.read_excel('proshares_analysis_data.xlsx',sheet_name='merrill_factors').set_index('date')
df_merrill_annual = df_merrill * 12
df_merrill_annual.head()
alpha =[]
beta = []
epsilon = []
for i in range(len(df_hedge_fund_annual.columns)):
y = df_hedge_fund_annual[df_hedge_fund_annual.columns[i]]
X = sm.add_constant(df_merrill_annual['SPY US Equity'])
model1 = sm.OLS(y,X).fit()
alpha.append(model1.params['const'])
beta.append(model1.params['SPY US Equity'])
epsilon.append(model1.resid)
epsilon = np.array(epsilon)
Treynor_ratio = mu_annual / beta
Information_ratio = np.array(alpha)/(epsilon.std()/np.sqrt(12))
df_regression = pd.DataFrame({'Beta': beta, 'Treynor Ratio': Treynor_ratio, 'Information Ratio': Information_ratio})
df_regression
###Output
_____no_output_____
###Markdown
__Problem 4__ Relative PerformanceDiscuss the previous statistics, and what they tell us about...
###Code
df_SPY = pd.DataFrame({'Mean':[df_merrill_annual['SPY US Equity'].mean()],
'Volatility':[df_merrill_annual['SPY US Equity'].std()/np.sqrt(12)],
'Sharpe Ratio': [df_merrill_annual['SPY US Equity'].mean()/(df_merrill_annual['SPY US Equity'].std()/np.sqrt(12))]},index=['SPY']).T
df_SPY
###Output
_____no_output_____
###Markdown
(a) the differences between SPY and the hedge-fund series? __Solution:__ SPY has a higher mean total returns than the equities and indices in the hedge fund series, though with a higher volatility, still end up with a higher Sharpe Ratio. (b) which performs better between HDG and QAI. __Solution:__ HDG (0.0281) has a slightly high mean return than QAI (0.025(, however HDG also has a much higher volatility. This translates to QAI having a higher Sharpe Ratio than HDG (0.560 QAI vs 0.498 HDG). We conclude that QAI performs better than HDG. (c) whether HDG and the ML series capture the most notable properties of HFRI. __Solution:__ HDG and the ML series do not do a good job at capuring the key compenents of HFRI. The mean and Sharpe Ratio of HFRI are much higher than the HDG and the MLE series. This suggests that the HDG US Equity and the MLE series does not encompass the key components of HFRI. Furthermore, if we look at problem 5, we see that while the correlations are quite high, the returns are drastically different. This sugests that we are missing key aspects of HFRI in the HDG and ML series. __Problem 5__Report the correlation matrix for these assets.(a) Show the correlations as a heat map.
###Code
corrmat = df_hedge_fund_annual.corr()
# ignore self-correlation
corrmat[corrmat==1] = None
sns.heatmap(corrmat)
corrmat
###Output
_____no_output_____
###Markdown
(b) Which series have the highest and lowest correlations? The 2 ML series, MLEIFCTR Index and MLEIFCTX Index, have the highest correlation of 0.999939. QAI and HDG have the lowest correlation of 0.847597. __Problem 6__Replicate HFRI with the six factors listed on the “merrillfactors” tab. Include a constant, andrun the unrestricted regression:(a) Report the intercept and betas.(b) Are the betas realistic position sizes, or do they require huge long-short positions?(c) Report the R-squared.(d) Report the volatility of epsilon^merr, (the tracking error.)
###Code
X = sm.add_constant(df_merrill_annual)
y = df_hedge_fund_annual['HFRIFWI Index']
model_reg = sm.OLS(y,X).fit()
alpha = model_reg.params.const
beta_w_int = model_reg.params[df_merrill_annual.columns]
r_squared = model_reg.rsquared
vol_of_epsilon = np.array(model_reg.resid.std())/np.sqrt(12)
print('Intercept: '+str(alpha)+ ', \nbeta: '+str(beta_w_int)+', \nR-squared: '+str(r_squared)+' \nVolatility of error: '+ str(vol_of_epsilon))
###Output
Intercept: 0.01376038405810625,
beta: SPY US Equity 0.072022
USGG3M Index -0.400591
EEM US Equity 0.072159
EFA US Equity 0.106318
EUO US Equity 0.022431
IWM US Equity 0.130892
dtype: float64,
R-squared: 0.8556947723602492
Volatility of error: 0.023365015548091672
###Markdown
The betas are not realistic postiion sizes. The regression suggests that 40% of our portfolio is to be short the USDD3M Index. This is a very large short position and no reasonable fund would agree to this allocation. __Problem 7__: Out-of-Sample
###Code
import warnings
date_range = df_merrill_annual.iloc[61:].index
oos_fitted = pd.Series(index=date_range, name='OOS_fit',dtype='float64')
for date in date_range:
date_month_prior = pd.DatetimeIndex([date]).shift(periods=-1, freq='M')[0]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
X = sm.add_constant(df_merrill_annual[:date_month_prior])
y = df_hedge_fund_annual['HFRIFWI Index'][:date_month_prior]
model3 = sm.OLS(y, X, drop="missing").fit()
alpha = model3.params['const']
beta = model3.params.drop(index='const')
X_t = df_merrill_annual.loc[date]
predicted_next_value = alpha + X_t @ beta
oos_fitted[date] = predicted_next_value
df_OOS = pd.DataFrame(df_hedge_fund_annual['HFRIFWI Index'].iloc[61:])
df_OOS['Regression result'] = oos_fitted
df_OOS['Difference From Target'] = df_OOS['HFRIFWI Index'] - df_OOS['Regression result']
mean_diff = df_OOS['Difference From Target'].mean()
print('The mean difference from the Out of Smaple replication vs the target replication is :' + str(mean_diff))
print('This is an average of', round(mean_diff/mu_annual['HFRIFWI Index']*100, 4),'% error')
df_OOS
# The mean error between the target replication and sample replication is 0.0137~, which averages 27% error.
###Output
The mean difference from the Out of Smaple replication vs the target replication is :0.013734933302694178
This is an average of 27.0457 % error
###Markdown
__Problem 8__ We estimated the replications using an intercept. Try the full-sample estimation, but this timewithout an interceptReport(a) the regression beta. How does it compare to the estimated beta with an intercept,ˆβmerr?(b) the mean of the fitted value, ˇrhfrit. How does it compare to the mean of the HFRI?(c) the correlations of the fitted values, ˇrhfritto the HFRI. How does the correlation compare tothat of the fitted values with an intercept, ˆrhfrit
###Code
X = df_merrill_annual
y = df_hedge_fund_annual['HFRIFWI Index']
model_reg_no_alpha = sm.OLS(y,X).fit()
beta_wo_int = model_reg_no_alpha.params[df_merrill_annual.columns]
betas = pd.DataFrame({'Alpha': model_reg.params,'No Alpha': model_reg_no_alpha.params})
betas.assign(Diff = lambda x: x.Alpha - x['No Alpha'])
predicted_returns_wo_intercept = X @ beta_wo_int
print('Mean of the predicted returns without interception: '+str(predicted_returns_wo_intercept.mean())
+", Actual mean annual return: "+ str(mu_annual['HFRIFWI Index']))
corr_wo_intercept = predicted_returns_wo_intercept.corr(y)
predicted_returns_w_intercept = X @ beta_w_int
corr_w_intercept = predicted_returns_w_intercept.corr(y)
print('Correlation of the predicted returns without interception with HFRI: '+str(corr_wo_intercept)
+", Correlation of the predicted returns with interception with HFRI: "+ str(corr_w_intercept))
###Output
Mean of the predicted returns without interception: 0.04288821007122869, Actual mean annual return: 0.050784216013560556
Correlation of the predicted returns without interception with HFRI: 0.924516841871503, Correlation of the predicted returns with interception with HFRI: 0.9250377140204874
|
Code/Lotka_Volterra_with_plastic_particles.ipynb | ###Markdown
Sample discretized diffusion
###Code
def generate_observations(x0,n,gamma,r_seed,d):
X_train, noise_train = generate_traj(x0,n,gamma,r_seed,d)
#set target function
#Y_train = np.sum(X_train,axis=1)
Y_train = X_train[:,0]
X_train = X_train.reshape((1,-1,d))
Y_train = Y_train.reshape((1,-1,1))
return X_train, noise_train, Y_train
x0 = np.array([10.0,5.0,0.0,0.0])
r_seed = 1812
X_train, noise_train, Y_train = generate_observations(x0,n,gamma,r_seed,d)
print(X_train[0,:1000,:])
print(Y_train.shape)
print(X_train.shape)
###Output
(1, 2000, 1)
(1, 2000, 4)
###Markdown
Bernoulli:: Optimize coefficients by solving regression with polynomial features
###Code
#polynomial coefficients
coefs_poly = approx_q(X_train,Y_train,n_traj,lag,S_max)
#print(coefs_poly.shape)
print(coefs_poly)
regr_vals = np.zeros((lag,X_train.shape[1]),dtype=float)
poly = PolynomialFeatures(S_max)
features = poly.fit_transform(X_train[0])
#features = np.zeros((X_train.shape[1],6),dtype=float)
#features[:,0] = 1.0
#features[:,1:3] = X_train[0,:,:]
#features[:,3] = X_train[0,:,0]**2
#features[:,4] = X_train[0,:,0]*X_train[0,:,1]
#features[:,5] = X_train[0,:,1]**2
for i in range(len(regr_vals)):
regr_vals[i,:] = np.sum(coefs_poly[i,:]*features,axis=1)
plt.figure(figsize=(10, 10))
#plt.title("Testing regression model",fontsize=20)
plt.plot(X_train[0,:2000,0],color='r',label='first population')
plt.plot(X_train[0,:2000,1],color='g',label='second population')
#plt.plot(X_train[0,:2000,2],color='m',label='control')
plt.legend(loc = 'upper left',fontsize = 16)
#plt.show()
plt.savefig('./4d_lotka_volterra_plastic')
###Output
_____no_output_____
###Markdown
Test our regressors
###Code
cur_lag = 99
N_pts = 1000
plt.figure(figsize=(10, 10))
plt.title("Testing regression model",fontsize=20)
plt.plot(Y_train[0,cur_lag:N_pts+cur_lag,0],color='r',label='true function')
plt.plot(regr_vals[cur_lag,:N_pts],color='g',label = 'practical approximation')
plt.legend(loc = 'upper left',fontsize = 16)
plt.show()
n_traj_test = 40
test_seed = 1453
nbcores = multiprocessing.cpu_count()
trav = Pool(nbcores)
res = trav.starmap(test_traj, [(coefs_poly,gamma,test_seed+i,lag,d,N_test,x0) for i in range (n_traj_test)])
#res = trav.starmap(test_traj, [(Cur_pot,coefs_poly,step,test_seed+i,lag,K_max,S_max,N_burn,N_test,d,f_type,inds_arr,params,x0,fixed_start) for i in range (n_traj_test)])
trav.close()
res = np.asarray(res)
print(res.shape)
###Output
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
(2000, 4)
###Markdown
Comparison plots
###Code
title = ""
#labels = ['Vanilla\n Euler scheme', 'Euler scheme \nwith MDCV-1']
labels = ['Vanilla\n Euler scheme', 'Euler scheme \nwith MDCV-1','Euler scheme \nwith MDCV-2']
data = [res[:,0], res[:,1], res[:,2]]
boxplot_ind(data, title, labels,path="./lotka_vorterra_01_12_plastic_particles.pdf")
def set_axis_style_boxplot(ax, labels, parts):
colors = (sns.color_palette("muted")[0:7])
ax.grid(color='black', linestyle='-', linewidth=0.15, alpha=0.6)
ax.set_xticks(np.arange(1, len(labels)+1))
ax.set_xticklabels(labels, fontsize=12)
ax.set_xlim(0.5, len(labels) + 0.5)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["right"].set_visible(False)
for pc,i in zip(parts['boxes'],range(len(labels))):
pc.set(facecolor=colors[i],alpha=0.65)
pc.set_edgecolor('black')
pc.set_linewidth(0.65)
def boxplot_ind(data, title, labels, path):
meanprops = dict(linestyle='-', linewidth=1, color='black')
medianprops = dict(linestyle='', linewidth=0)
fig, ax1 = plt.subplots(nrows=1, ncols=1, figsize=(8, 4), sharey=True, frameon=False,dpi=100)
fig.suptitle(title, fontsize=20)
parts = ax1.boxplot(data, widths=0.6, patch_artist=True, meanline=True, showmeans=True, medianprops=medianprops,meanprops = meanprops, showfliers=False)
set_axis_style_boxplot(ax1, labels, parts)
fig.tight_layout()
fig.subplots_adjust(top=0.85)
plt.savefig(path)
plt.show()
title = ""
#labels = ['Vanilla\n Euler scheme', 'Euler scheme \nwith MDCV-1']
labels = ['Vanilla \n Euler scheme','MDCV-2, \n $N = 10^3$', 'MDCV-2, \n $N = 2 \\times 10^3$',\
'MDCV-2, \n $N = 5 \\times 10^3$', 'MDCV-2, \n $N = 10^4$', 'MDCV-2, \n $N = 5 \\times 10^4$']
#labels = ['lag \n = 10','lag \n = 20', 'lag \n = 30', 'lag \n = 40', 'lag \n = 50', 'lag \n = 60']
#labels = ['lag = 30', 'lag = 40', 'lag = 50', 'lag = 60']
data = [res_new_0[:,0],res_new_0[:,2],res_new[:,2],res_new_1[:,2], res_new_2[:,2],res_new_3[:,2]]
#data = [res_new_1[:,2], res_new_2[:,2],res_new_3[:,2],res_new_4[:,2]]
boxplot_ind(data, title, labels,path="./2d_nonsymmetric_potential_quadratic_regression_comparison.pdf")
data = [res_new_0[:,0],res_new_0[:,2],res_new[:,2],res_new_1[:,2], res_new_2[:,2],res_new_3[:,2],res_new_4[:,2]]
#data = [res_new_1[:,2], res_new_2[:,2],res_new_3[:,2],res_new_4[:,2]]
boxplot_ind(data, title, labels,path="./2d_nonsymmetric_potential_lags_comparison.pdf")
###Output
_____no_output_____ |
examples/dsdemo1.ipynb | ###Markdown
NTF synthesis - demo 1=======================Demonstration of the **`synthesizeNTF`** function, as done in the **MATLAB Delta Sigma Toolbox**, employing its Python port **`deltasigma`**. * The **Noise Transfer Function** (NTF) is synthesized for a **5th-order**, **low-pass** modulator. * The first section deals with an **NTF without optimized zeros** (`opt=0`), * while the second section with an **NTF *with optimized* zeros** (`opt=1`). * Finally the two transfer functions are compared. * Then we move on to the synthesis of an **8th-order band-pass modulator** with optimized zeros. 5th-order modulator-------------------General parameters:
###Code
order = 5
OSR = 32
###Output
_____no_output_____
###Markdown
5th-order modulator: NTF without zeros optimizationThe synthesis of an NTF can be performed with the `synthesizeNTF(order, OSR, opt)`.We intentionally disable the zeros optimization, setting `opt=0`.
###Code
# Synthesize!
H0 = synthesizeNTF(order, OSR, opt=0)
# 1. Plot the singularities.
subplot(121)
plotPZ(H0, markersize=5)
title('NTF Poles and Zeros')
f = np.concatenate((np.linspace(0, 0.75/OSR, 100), np.linspace(0.75/OSR, 0.5, 100)))
z = np.exp(2j*np.pi*f)
magH0 = dbv(evalTF(H0, z))
# 2. Plot the magnitude responses.
subplot(222)
plot(f, magH0)
figureMagic([0, 0.5], 0.05, None, [-100, 10], 10, None, (16, 8))
xlabel('Normalized frequency ($1\\rightarrow f_s)$')
ylabel('dB')
title('NTF Magnitude Response')
# 3. Plot the magnitude responses in the signal band.
subplot(224)
fstart = 0.01
f = np.linspace(fstart, 1.2, 200)/(2*OSR)
z = np.exp(2j*np.pi*f)
magH0 = dbv(evalTF(H0, z))
semilogx(f*2*OSR, magH0)
axis([fstart, 1.2, -100,- 30])
grid(True)
sigma_H0 = dbv(rmsGain(H0, 0, 0.5/OSR))
#semilogx([fstart, 1], sigma_H0*np.array([1, 1]))
semilogx([fstart, 1], sigma_H0*np.array([1, 1]),'-o')
text(0.15, sigma_H0 + 5, 'rms gain = %5.0fdB' % sigma_H0)
xlabel('Normalized frequency ($1\\rightarrow f_B$)')
ylabel('dB')
tight_layout()
###Output
_____no_output_____
###Markdown
5th-order modulator: NTF *with* zeros optimizationThis time we enable the zeros optimization, setting `opt=1` when calling synthesizeNTF(), then replot the NTF as above.
###Code
# Synthesize again!
H0 = None
H1 = synthesizeNTF(order, OSR, opt=1)
# 1. Plot the singularities.
subplot(121)
plotPZ(H1, markersize=5)
title('NTF Poles and Zeros')
f = np.concatenate((np.linspace(0, 0.75/OSR, 100), np.linspace(0.75/OSR, 0.5, 100)))
z = np.exp(2j*np.pi*f)
magH1 = dbv(evalTF(H1, z))
# 2. Plot the magnitude responses.
subplot(222)
plot(f, magH1)
figureMagic([0, 0.5], 0.05, None, [-100, 10], 10, None, (16, 8))
xlabel('Normalized frequency ($1\\rightarrow f_s)$')
ylabel('dB')
title('NTF Magnitude Response')
# 3. Plot the magnitude responses in the signal band.
subplot(224)
fstart = 0.01
f = np.linspace(fstart, 1.2, 200)/(2*OSR)
z = np.exp(2j*np.pi*f)
magH1 = dbv(evalTF(H1, z))
semilogx(f*2*OSR, magH1)
axis([fstart, 1.2, -100,- 30])
grid(True)
sigma_H1 = dbv(rmsGain(H1, 0, 0.5/OSR))
#semilogx([fstart, 1], sigma_H1*np.array([1, 1]))
semilogx([fstart, 1], sigma_H1*np.array([1, 1]),'-o')
text(0.15, sigma_H1 + 5, 'RMS gain = %5.0fdB' % sigma_H1)
xlabel('Normalized frequency ($1\\rightarrow f_B$)')
ylabel('dB')
tight_layout()
###Output
_____no_output_____
###Markdown
5th-order modulator: comparison-------------------------------Overlayed plots follow to ease comparison of the two synthetization approaches.
###Code
# Synthesize!
H0 = synthesizeNTF(order, OSR, opt=0)
H1 = synthesizeNTF(order, OSR, opt=1)
# 1. Plot the singularities.
subplot(121)
# we plot the singularities of the optimized NTF in light
# green with slightly bigger markers so that we can better
# distinguish the two NTF's when overlayed.
plotPZ(H1, markersize=7, color='#90EE90')
plotPZ(H0, markersize=5)
title('NTF Poles and Zeros')
f = np.concatenate((np.linspace(0, 0.75/OSR, 100), np.linspace(0.75/OSR, 0.5, 100)))
z = np.exp(2j*np.pi*f)
magH0 = dbv(evalTF(H0, z))
magH1 = dbv(evalTF(H1, z))
# 2. Plot the magnitude responses.
subplot(222)
plot(f, magH0, label='All zeros in z=1')
plot(f, magH1, label='Optimized zeros')
figureMagic([0, 0.5], 0.05, None, [-100, 10], 10, None, (16, 8))
xlabel('Normalized frequency ($1\\rightarrow f_s)$')
ylabel('dB')
legend(loc=4)
title('NTF Magnitude Response')
# 3. Plot the magnitude responses in the signal band.
subplot(224)
fstart = 0.01
f = np.linspace(fstart, 1.2, 200)/(2*OSR)
z = np.exp(2j*np.pi*f)
magH0 = dbv(evalTF(H0, z))
magH1 = dbv(evalTF(H1, z))
semilogx(f*2*OSR, magH0, label='All zeros in z=1')
semilogx(f*2*OSR, magH1, label='Optimized zeros')
axis([fstart, 1.2, -100,- 30])
grid(True)
sigma_H0 = dbv(rmsGain(H0, 0, 0.5/OSR))
sigma_H1 = dbv(rmsGain(H1, 0, 0.5/OSR))
#semilogx([fstart, 1], sigma_H0*np.array([1, 1]))
plot([fstart, 1], sigma_H0*np.array([1, 1]), 'o-')
text(0.15, sigma_H0 + 5, 'RMS gain = %5.0fdB' % sigma_H0)
#semilogx([fstart, 1], sigma_H1*np.array([1, 1]))
plot([fstart, 1], sigma_H1*np.array([1, 1]), 'o-')
text(0.15, sigma_H1 + 5, 'RMS gain = %5.0fdB' % sigma_H1)
xlabel('Normalized frequency ($1\\rightarrow f_B$)')
ylabel('dB')
legend(loc=4)
tight_layout()
###Output
_____no_output_____
###Markdown
8th-order bandpass Modulator----------------------------In the following, we synthesize an 8th-order modulator with optimized zeros.
###Code
order = 8
OSR = 64
opt = 2
f0 = 0.125
H = synthesizeNTF(order, OSR, opt, 1.5, f0)
subplot(121)
plotPZ(H)
title('Bandpass NTF Poles and Zeros')
f = np.concatenate((np.linspace(0, f0 - 1./(2.*OSR), 50),
np.linspace(f0 - 1./ (2 * OSR), f0 + 1./(2.*OSR), 100),
np.linspace(f0 + 1./(2.*OSR), 0.5, 50)))
z = np.exp(2j * pi * f)
magH = dbv(evalTF(H, z))
subplot(222)
plot(f, magH)
G = (np.zeros((order//2,)), H[1], 1)
k = 1./np.abs(evalTF(G, np.exp(2j*np.pi*f0)))
G = (G[0], G[1], k)
magG = dbv(evalTF(G, z))
plot(f, magG, 'r')
figureMagic([0, 0.5], 0.05, None, [-100, 10], 10, None, (16, 8))
#axis([0, 0.5, -100, 10])
grid(True)
xlabel('Normalized frequency ($1 \\rightarrow fs$)')
ylabel('dB')
title('Bandpass NTF/STF Magnitude Response')
f = np.linspace(f0 - 0.3/OSR, f0 + 0.3/OSR)
z = np.exp(2j*np.pi*f)
magH = dbv(evalTF(H, z))
subplot(224)
fstart = -.5
plot(2*OSR*(f - f0), magH)
axis([- 0.6, 0.6, -100, -60])
grid(True)
sigma_H = dbv(rmsGain(H, f0 - 0.25/OSR, f0 + 0.25/OSR))
#plot([-0.5, 0.5], sigma_H*np.array([1, 1]))
plot([-0.5, 0.5], sigma_H*np.array([1, 1]), 'o-')
text(-.2, sigma_H + 5, 'rms gain = %5.0fdB' % sigma_H)
xlabel('Normalized frequency offset')
ylabel('dB')
tight_layout()
###Output
_____no_output_____
###Markdown
Further information about NTF synthesis---------------------------------------Please refer to `help(synthesizeNTF)` for detailed - and possibly more updated - documentation! `help(synthesizeNTF)` as of writing: Help on function synthesizeNTF in module deltasigma._synthesizeNTF:**synthesizeNTF(order=3, osr=64, opt=0, H_inf=1.5, f0=0.0)**Synthesize a noise transfer function for a delta-sigma modulator.**Parameters:**order : *int, optional* the order of the modulator, defaults to 3osr : *float, optional* the oversamping ratio, defaults to 64opt : *int or list of floats, optional* flag for optimized zeros, defaults to 0* 0 -> not optimized,* 1 -> optimized,* 2 -> optimized with at least one zero at band-center,* 3 -> optimized zeros (with optimizer)* 4 -> same as 3, but with at least one zero at band-center* [z] -> zero locations in complex formH_inf : *real, optional* max allowed peak value of the NTF. Defaults to 1.5f0 : *real, optional* center frequency for BP modulators, or 0 for LP modulators. Defaults to 0. 1 corresponds to the sampling frequency, so that 0.5 is the maximum value. A value of 0 specifies an LP modulator.**Returns:**ntf : *tuple* noise transfer function in zpk form.**Raises:**ValueError* 'Error. f0 must be less than 0.5' if f0 is out of range* 'Order must be even for a bandpass modulator.' if the order is incompatible with the modulator type.* 'The opt vector must be of length xxx' if opt is used to explicitly pass the NTF zeros and these are in the wrong number.**Warns:*** 'Creating a lowpass ntf.' if the center frequency is different from zero, but so low that a low pass modulator must be designed.* 'Unable to achieve specified H_inf ...' if the desired H_inf cannot be achieved.* 'Iteration limit exceeded' if the routine converges too slowly.**Notes:**This is actually a wrapper function which calls the appropriate versionof synthesizeNTF, based on the module control flag `optimize_NTF` whichdetermines whether to use optimization tools.Parameter ``H_inf`` is used to enforce the Lee stability criterion.**See also:*** `clans()` : Closed-Loop Analysis of Noise-Shaper. An alternative method for selecting NTFs based on the 1-norm of the impulse response of the NTF * `synthesizeChebyshevNTF()` : Select a type-2 highpass Chebyshev NTF.This function does a better job than synthesizeNTF if osr or H_inf is low. System version information
###Code
%version_information numpy, scipy, matplotlib, deltasigma
###Output
_____no_output_____
###Markdown
NTF synthesis - demo 1=======================Demonstration of the **`synthesizeNTF`** function, as done in the **MATLAB Delta Sigma Toolbox**, employing its Python port **`deltasigma`**. * The **Noise Transfer Function** (NTF) is synthesized for a **5th-order**, **low-pass** modulator. * The first section deals with an **NTF without optimized zeros** (`opt=0`), * while the second section with an **NTF *with optimized* zeros** (`opt=1`). * Finally the two transfer functions are compared. * Then we move on to the synthesis of an **8th-order band-pass modulator** with optimized zeros. 5th-order modulator-------------------General parameters:
###Code
order = 5
OSR = 32
###Output
_____no_output_____
###Markdown
5th-order modulator: NTF without zeros optimizationThe synthesis of an NTF can be performed with the `synthesizeNTF(order, OSR, opt)`.We intentionally disable the zeros optimization, setting `opt=0`.
###Code
# Synthesize!
H0 = synthesizeNTF(order, OSR, opt=0)
# 1. Plot the singularities.
subplot(121)
plotPZ(H0, markersize=5)
title('NTF Poles and Zeros')
f = np.concatenate((np.linspace(0, 0.75/OSR, 100), np.linspace(0.75/OSR, 0.5, 100)))
z = np.exp(2j*np.pi*f)
magH0 = dbv(evalTF(H0, z))
# 2. Plot the magnitude responses.
subplot(222)
plot(f, magH0)
figureMagic([0, 0.5], 0.05, None, [-100, 10], 10, None, (16, 8))
xlabel('Normalized frequency ($1\\rightarrow f_s)$')
ylabel('dB')
title('NTF Magnitude Response')
# 3. Plot the magnitude responses in the signal band.
subplot(224)
fstart = 0.01
f = np.linspace(fstart, 1.2, 200)/(2*OSR)
z = np.exp(2j*np.pi*f)
magH0 = dbv(evalTF(H0, z))
semilogx(f*2*OSR, magH0)
axis([fstart, 1.2, -100,- 30])
grid(True)
sigma_H0 = dbv(rmsGain(H0, 0, 0.5/OSR))
#semilogx([fstart, 1], sigma_H0*np.array([1, 1]))
semilogx([fstart, 1], sigma_H0*np.array([1, 1]),'-o')
text(0.15, sigma_H0 + 5, 'rms gain = %5.0fdB' % sigma_H0)
xlabel('Normalized frequency ($1\\rightarrow f_B$)')
ylabel('dB')
tight_layout()
###Output
_____no_output_____
###Markdown
5th-order modulator: NTF *with* zeros optimizationThis time we enable the zeros optimization, setting `opt=1` when calling synthesizeNTF(), then replot the NTF as above.
###Code
# Synthesize again!
H0 = None
H1 = synthesizeNTF(order, OSR, opt=1)
# 1. Plot the singularities.
subplot(121)
plotPZ(H1, markersize=5)
title('NTF Poles and Zeros')
f = np.concatenate((np.linspace(0, 0.75/OSR, 100), np.linspace(0.75/OSR, 0.5, 100)))
z = np.exp(2j*np.pi*f)
magH1 = dbv(evalTF(H1, z))
# 2. Plot the magnitude responses.
subplot(222)
plot(f, magH1)
figureMagic([0, 0.5], 0.05, None, [-100, 10], 10, None, (16, 8))
xlabel('Normalized frequency ($1\\rightarrow f_s)$')
ylabel('dB')
title('NTF Magnitude Response')
# 3. Plot the magnitude responses in the signal band.
subplot(224)
fstart = 0.01
f = np.linspace(fstart, 1.2, 200)/(2*OSR)
z = np.exp(2j*np.pi*f)
magH1 = dbv(evalTF(H1, z))
semilogx(f*2*OSR, magH1)
axis([fstart, 1.2, -100,- 30])
grid(True)
sigma_H1 = dbv(rmsGain(H1, 0, 0.5/OSR))
#semilogx([fstart, 1], sigma_H1*np.array([1, 1]))
semilogx([fstart, 1], sigma_H1*np.array([1, 1]),'-o')
text(0.15, sigma_H1 + 5, 'RMS gain = %5.0fdB' % sigma_H1)
xlabel('Normalized frequency ($1\\rightarrow f_B$)')
ylabel('dB')
tight_layout()
###Output
_____no_output_____
###Markdown
5th-order modulator: comparison-------------------------------Overlayed plots follow to ease comparison of the two synthetization approaches.
###Code
# Synthesize!
H0 = synthesizeNTF(order, OSR, opt=0)
H1 = synthesizeNTF(order, OSR, opt=1)
# 1. Plot the singularities.
subplot(121)
# we plot the singularities of the optimized NTF in light
# green with slightly bigger markers so that we can better
# distinguish the two NTF's when overlayed.
plotPZ(H1, markersize=7, color='#90EE90')
hold(True)
plotPZ(H0, markersize=5)
title('NTF Poles and Zeros')
f = np.concatenate((np.linspace(0, 0.75/OSR, 100), np.linspace(0.75/OSR, 0.5, 100)))
z = np.exp(2j*np.pi*f)
magH0 = dbv(evalTF(H0, z))
magH1 = dbv(evalTF(H1, z))
# 2. Plot the magnitude responses.
subplot(222)
plot(f, magH0, label='All zeros in z=1')
hold(True)
plot(f, magH1, label='Optimized zeros')
figureMagic([0, 0.5], 0.05, None, [-100, 10], 10, None, (16, 8))
xlabel('Normalized frequency ($1\\rightarrow f_s)$')
ylabel('dB')
legend(loc=4)
title('NTF Magnitude Response')
# 3. Plot the magnitude responses in the signal band.
subplot(224)
fstart = 0.01
f = np.linspace(fstart, 1.2, 200)/(2*OSR)
z = np.exp(2j*np.pi*f)
magH0 = dbv(evalTF(H0, z))
magH1 = dbv(evalTF(H1, z))
semilogx(f*2*OSR, magH0, label='All zeros in z=1')
hold(True)
semilogx(f*2*OSR, magH1, label='Optimized zeros')
axis([fstart, 1.2, -100,- 30])
grid(True)
sigma_H0 = dbv(rmsGain(H0, 0, 0.5/OSR))
sigma_H1 = dbv(rmsGain(H1, 0, 0.5/OSR))
#semilogx([fstart, 1], sigma_H0*np.array([1, 1]))
plot([fstart, 1], sigma_H0*np.array([1, 1]), 'o-')
text(0.15, sigma_H0 + 5, 'RMS gain = %5.0fdB' % sigma_H0)
#semilogx([fstart, 1], sigma_H1*np.array([1, 1]))
plot([fstart, 1], sigma_H1*np.array([1, 1]), 'o-')
text(0.15, sigma_H1 + 5, 'RMS gain = %5.0fdB' % sigma_H1)
xlabel('Normalized frequency ($1\\rightarrow f_B$)')
ylabel('dB')
legend(loc=4)
tight_layout()
###Output
_____no_output_____
###Markdown
8th-order bandpass Modulator----------------------------In the following, we synthesize an 8th-order modulator with optimized zeros.
###Code
order = 8
OSR = 64
opt = 2
f0 = 0.125
H = synthesizeNTF(order, OSR, opt, 1.5, f0)
subplot(121)
plotPZ(H)
title('Bandpass NTF Poles and Zeros')
f = np.concatenate((np.linspace(0, f0 - 1./(2.*OSR), 50),
np.linspace(f0 - 1./ (2 * OSR), f0 + 1./(2.*OSR), 100),
np.linspace(f0 + 1./(2.*OSR), 0.5, 50)))
z = np.exp(2j * pi * f)
magH = dbv(evalTF(H, z))
subplot(222)
plot(f, magH)
hold(True)
G = (np.zeros((order/2,)), H[1], 1)
k = 1./np.abs(evalTF(G, np.exp(2j*np.pi*f0)))
G = (G[0], G[1], k)
magG = dbv(evalTF(G, z))
plot(f, magG, 'r')
figureMagic([0, 0.5], 0.05, None, [-100, 10], 10, None, (16, 8))
#axis([0, 0.5, -100, 10])
grid(True)
xlabel('Normalized frequency ($1 \\rightarrow fs$)')
ylabel('dB')
title('Bandpass NTF/STF Magnitude Response')
f = np.linspace(f0 - 0.3/OSR, f0 + 0.3/OSR)
z = np.exp(2j*np.pi*f)
magH = dbv(evalTF(H, z))
subplot(224)
fstart = -.5
plot(2*OSR*(f - f0), magH)
axis([- 0.6, 0.6, -100, -60])
grid(True)
sigma_H = dbv(rmsGain(H, f0 - 0.25/OSR, f0 + 0.25/OSR))
hold(True)
#plot([-0.5, 0.5], sigma_H*np.array([1, 1]))
plot([-0.5, 0.5], sigma_H*np.array([1, 1]), 'o-')
text(-.2, sigma_H + 5, 'rms gain = %5.0fdB' % sigma_H)
xlabel('Normalized frequency offset')
ylabel('dB')
tight_layout()
###Output
_____no_output_____
###Markdown
Further information about NTF synthesis---------------------------------------Please refer to `help(synthesizeNTF)` for detailed - and possibly more updated - documentation!`help(synthesizeNTF)` as of writing: Help on function synthesizeNTF in module deltasigma._synthesizeNTF:**synthesizeNTF(order=3, osr=64, opt=0, H_inf=1.5, f0=0.0)**Synthesize a noise transfer function for a delta-sigma modulator.**Parameters:**order : *int, optional* the order of the modulator, defaults to 3osr : *float, optional* the oversamping ratio, defaults to 64opt : *int or list of floats, optional* flag for optimized zeros, defaults to 0* 0 -> not optimized,* 1 -> optimized,* 2 -> optimized with at least one zero at band-center,* 3 -> optimized zeros (with optimizer)* 4 -> same as 3, but with at least one zero at band-center* [z] -> zero locations in complex formH_inf : *real, optional* max allowed peak value of the NTF. Defaults to 1.5f0 : *real, optional* center frequency for BP modulators, or 0 for LP modulators. Defaults to 0. 1 corresponds to the sampling frequency, so that 0.5 is the maximum value. A value of 0 specifies an LP modulator.**Returns:**ntf : *tuple* noise transfer function in zpk form.**Raises:**ValueError* 'Error. f0 must be less than 0.5' if f0 is out of range* 'Order must be even for a bandpass modulator.' if the order is incompatible with the modulator type.* 'The opt vector must be of length xxx' if opt is used to explicitly pass the NTF zeros and these are in the wrong number.**Warns:*** 'Creating a lowpass ntf.' if the center frequency is different from zero, but so low that a low pass modulator must be designed.* 'Unable to achieve specified H_inf ...' if the desired H_inf cannot be achieved.* 'Iteration limit exceeded' if the routine converges too slowly.**Notes:**This is actually a wrapper function which calls the appropriate versionof synthesizeNTF, based on the module control flag `optimize_NTF` whichdetermines whether to use optimization tools.Parameter ``H_inf`` is used to enforce the Lee stability criterion.**See also:*** `clans()` : Closed-Loop Analysis of Noise-Shaper. An alternative method for selecting NTFs based on the 1-norm of the impulse response of the NTF * `synthesizeChebyshevNTF()` : Select a type-2 highpass Chebyshev NTF.This function does a better job than synthesizeNTF if osr or H_inf is low. System version information
###Code
#%install_ext http://raw.github.com/jrjohansson/version_information/master/version_information.py
%load_ext version_information
%reload_ext version_information
%version_information numpy, scipy, matplotlib, deltasigma
###Output
_____no_output_____
###Markdown
NTF synthesis - demo 1=======================Demonstration of the **`synthesizeNTF`** function, as done in the **MATLAB Delta Sigma Toolbox**, employing its Python port **`deltasigma`**. * The **Noise Transfer Function** (NTF) is synthesized for a **5th-order**, **low-pass** modulator. * The first section deals with an **NTF without optimized zeros** (`opt=0`), * while the second section with an **NTF *with optimized* zeros** (`opt=1`). * Finally the two transfer functions are compared. * Then we move on to the synthesis of an **8th-order band-pass modulator** with optimized zeros. 5th-order modulator-------------------General parameters:
###Code
order = 5
OSR = 32
###Output
_____no_output_____
###Markdown
5th-order modulator: NTF without zeros optimizationThe synthesis of an NTF can be performed with the `synthesizeNTF(order, OSR, opt)`.We intentionally disable the zeros optimization, setting `opt=0`.
###Code
# Synthesize!
H0 = synthesizeNTF(order, OSR, opt=0)
# 1. Plot the singularities.
subplot(121)
plotPZ(H0, markersize=5)
title('NTF Poles and Zeros')
f = np.concatenate((np.linspace(0, 0.75/OSR, 100), np.linspace(0.75/OSR, 0.5, 100)))
z = np.exp(2j*np.pi*f)
magH0 = dbv(evalTF(H0, z))
# 2. Plot the magnitude responses.
subplot(222)
plot(f, magH0)
figureMagic([0, 0.5], 0.05, None, [-100, 10], 10, None, (16, 8))
xlabel('Normalized frequency ($1\\rightarrow f_s)$')
ylabel('dB')
title('NTF Magnitude Response')
# 3. Plot the magnitude responses in the signal band.
subplot(224)
fstart = 0.01
f = np.linspace(fstart, 1.2, 200)/(2*OSR)
z = np.exp(2j*np.pi*f)
magH0 = dbv(evalTF(H0, z))
semilogx(f*2*OSR, magH0)
axis([fstart, 1.2, -100,- 30])
grid(True)
sigma_H0 = dbv(rmsGain(H0, 0, 0.5/OSR))
#semilogx([fstart, 1], sigma_H0*np.array([1, 1]))
semilogx([fstart, 1], sigma_H0*np.array([1, 1]),'-o')
text(0.15, sigma_H0 + 5, 'rms gain = %5.0fdB' % sigma_H0)
xlabel('Normalized frequency ($1\\rightarrow f_B$)')
ylabel('dB')
tight_layout()
###Output
_____no_output_____
###Markdown
5th-order modulator: NTF *with* zeros optimizationThis time we enable the zeros optimization, setting `opt=1` when calling synthesizeNTF(), then replot the NTF as above.
###Code
# Synthesize again!
H0 = None
H1 = synthesizeNTF(order, OSR, opt=1)
# 1. Plot the singularities.
subplot(121)
plotPZ(H1, markersize=5)
title('NTF Poles and Zeros')
f = np.concatenate((np.linspace(0, 0.75/OSR, 100), np.linspace(0.75/OSR, 0.5, 100)))
z = np.exp(2j*np.pi*f)
magH1 = dbv(evalTF(H1, z))
# 2. Plot the magnitude responses.
subplot(222)
plot(f, magH1)
figureMagic([0, 0.5], 0.05, None, [-100, 10], 10, None, (16, 8))
xlabel('Normalized frequency ($1\\rightarrow f_s)$')
ylabel('dB')
title('NTF Magnitude Response')
# 3. Plot the magnitude responses in the signal band.
subplot(224)
fstart = 0.01
f = np.linspace(fstart, 1.2, 200)/(2*OSR)
z = np.exp(2j*np.pi*f)
magH1 = dbv(evalTF(H1, z))
semilogx(f*2*OSR, magH1)
axis([fstart, 1.2, -100,- 30])
grid(True)
sigma_H1 = dbv(rmsGain(H1, 0, 0.5/OSR))
#semilogx([fstart, 1], sigma_H1*np.array([1, 1]))
semilogx([fstart, 1], sigma_H1*np.array([1, 1]),'-o')
text(0.15, sigma_H1 + 5, 'RMS gain = %5.0fdB' % sigma_H1)
xlabel('Normalized frequency ($1\\rightarrow f_B$)')
ylabel('dB')
tight_layout()
###Output
_____no_output_____
###Markdown
5th-order modulator: comparison-------------------------------Overlayed plots follow to ease comparison of the two synthetization approaches.
###Code
# Synthesize!
H0 = synthesizeNTF(order, OSR, opt=0)
H1 = synthesizeNTF(order, OSR, opt=1)
# 1. Plot the singularities.
subplot(121)
# we plot the singularities of the optimized NTF in light
# green with slightly bigger markers so that we can better
# distinguish the two NTF's when overlayed.
plotPZ(H1, markersize=7, color='#90EE90')
hold(True)
plotPZ(H0, markersize=5)
title('NTF Poles and Zeros')
f = np.concatenate((np.linspace(0, 0.75/OSR, 100), np.linspace(0.75/OSR, 0.5, 100)))
z = np.exp(2j*np.pi*f)
magH0 = dbv(evalTF(H0, z))
magH1 = dbv(evalTF(H1, z))
# 2. Plot the magnitude responses.
subplot(222)
plot(f, magH0, label='All zeros in z=1')
hold(True)
plot(f, magH1, label='Optimized zeros')
figureMagic([0, 0.5], 0.05, None, [-100, 10], 10, None, (16, 8))
xlabel('Normalized frequency ($1\\rightarrow f_s)$')
ylabel('dB')
legend(loc=4)
title('NTF Magnitude Response')
# 3. Plot the magnitude responses in the signal band.
subplot(224)
fstart = 0.01
f = np.linspace(fstart, 1.2, 200)/(2*OSR)
z = np.exp(2j*np.pi*f)
magH0 = dbv(evalTF(H0, z))
magH1 = dbv(evalTF(H1, z))
semilogx(f*2*OSR, magH0, label='All zeros in z=1')
hold(True)
semilogx(f*2*OSR, magH1, label='Optimized zeros')
axis([fstart, 1.2, -100,- 30])
grid(True)
sigma_H0 = dbv(rmsGain(H0, 0, 0.5/OSR))
sigma_H1 = dbv(rmsGain(H1, 0, 0.5/OSR))
#semilogx([fstart, 1], sigma_H0*np.array([1, 1]))
plot([fstart, 1], sigma_H0*np.array([1, 1]), 'o-')
text(0.15, sigma_H0 + 5, 'RMS gain = %5.0fdB' % sigma_H0)
#semilogx([fstart, 1], sigma_H1*np.array([1, 1]))
plot([fstart, 1], sigma_H1*np.array([1, 1]), 'o-')
text(0.15, sigma_H1 + 5, 'RMS gain = %5.0fdB' % sigma_H1)
xlabel('Normalized frequency ($1\\rightarrow f_B$)')
ylabel('dB')
legend(loc=4)
tight_layout()
###Output
_____no_output_____
###Markdown
8th-order bandpass Modulator----------------------------In the following, we synthesize an 8th-order modulator with optimized zeros.
###Code
order = 8
OSR = 64
opt = 2
f0 = 0.125
H = synthesizeNTF(order, OSR, opt, 1.5, f0)
subplot(121)
plotPZ(H)
title('Bandpass NTF Poles and Zeros')
f = np.concatenate((np.linspace(0, f0 - 1./(2.*OSR), 50),
np.linspace(f0 - 1./ (2 * OSR), f0 + 1./(2.*OSR), 100),
np.linspace(f0 + 1./(2.*OSR), 0.5, 50)))
z = np.exp(2j * pi * f)
magH = dbv(evalTF(H, z))
subplot(222)
plot(f, magH)
hold(True)
G = (np.zeros((order/2,)), H[1], 1)
k = 1./np.abs(evalTF(G, np.exp(2j*np.pi*f0)))
G = (G[0], G[1], k)
magG = dbv(evalTF(G, z))
plot(f, magG, 'r')
figureMagic([0, 0.5], 0.05, None, [-100, 10], 10, None, (16, 8))
#axis([0, 0.5, -100, 10])
grid(True)
xlabel('Normalized frequency ($1 \\rightarrow fs$)')
ylabel('dB')
title('Bandpass NTF/STF Magnitude Response')
f = np.linspace(f0 - 0.3/OSR, f0 + 0.3/OSR)
z = np.exp(2j*np.pi*f)
magH = dbv(evalTF(H, z))
subplot(224)
fstart = -.5
plot(2*OSR*(f - f0), magH)
axis([- 0.6, 0.6, -100, -60])
grid(True)
sigma_H = dbv(rmsGain(H, f0 - 0.25/OSR, f0 + 0.25/OSR))
hold(True)
#plot([-0.5, 0.5], sigma_H*np.array([1, 1]))
plot([-0.5, 0.5], sigma_H*np.array([1, 1]), 'o-')
text(-.2, sigma_H + 5, 'rms gain = %5.0fdB' % sigma_H)
xlabel('Normalized frequency offset')
ylabel('dB')
tight_layout()
###Output
_____no_output_____
###Markdown
Further information about NTF synthesis---------------------------------------Please refer to `help(synthesizeNTF)` for detailed - and possibly more updated - documentation!`help(synthesizeNTF)` as of writing: Help on function synthesizeNTF in module deltasigma._synthesizeNTF:**synthesizeNTF(order=3, osr=64, opt=0, H_inf=1.5, f0=0.0)**Synthesize a noise transfer function for a delta-sigma modulator.**Parameters:**order : *int, optional* the order of the modulator, defaults to 3osr : *float, optional* the oversamping ratio, defaults to 64opt : *int or list of floats, optional* flag for optimized zeros, defaults to 0* 0 -> not optimized,* 1 -> optimized,* 2 -> optimized with at least one zero at band-center,* 3 -> optimized zeros (with optimizer)* 4 -> same as 3, but with at least one zero at band-center* [z] -> zero locations in complex formH_inf : *real, optional* max allowed peak value of the NTF. Defaults to 1.5f0 : *real, optional* center frequency for BP modulators, or 0 for LP modulators. Defaults to 0. 1 corresponds to the sampling frequency, so that 0.5 is the maximum value. A value of 0 specifies an LP modulator.**Returns:**ntf : *tuple* noise transfer function in zpk form.**Raises:**ValueError* 'Error. f0 must be less than 0.5' if f0 is out of range* 'Order must be even for a bandpass modulator.' if the order is incompatible with the modulator type.* 'The opt vector must be of length xxx' if opt is used to explicitly pass the NTF zeros and these are in the wrong number.**Warns:*** 'Creating a lowpass ntf.' if the center frequency is different from zero, but so low that a low pass modulator must be designed.* 'Unable to achieve specified H_inf ...' if the desired H_inf cannot be achieved.* 'Iteration limit exceeded' if the routine converges too slowly.**Notes:**This is actually a wrapper function which calls the appropriate versionof synthesizeNTF, based on the module control flag `optimize_NTF` whichdetermines whether to use optimization tools.Parameter ``H_inf`` is used to enforce the Lee stability criterion.**See also:*** `clans()` : Closed-Loop Analysis of Noise-Shaper. An alternative method for selecting NTFs based on the 1-norm of the impulse response of the NTF * `synthesizeChebyshevNTF()` : Select a type-2 highpass Chebyshev NTF.This function does a better job than synthesizeNTF if osr or H_inf is low. System version information
###Code
#%install_ext http://raw.github.com/jrjohansson/version_information/master/version_information.py
%load_ext version_information
%reload_ext version_information
%version_information numpy, scipy, matplotlib, deltasigma
###Output
The version_information extension is already loaded. To reload it, use:
%reload_ext version_information
|
note/convergency.ipynb | ###Markdown
Test the convergency of the sampling chains* Using three different approaches 1. K-L divergence 2. Autocorrelation time 3. Gelman-Rubin test
###Code
model_dir = '../model/'
model_str = 'default'
# The configuration file
config_file = os.path.join(model_dir, 'asap_final_%s.yaml' % model_str)
# The results of the final sampling process
result_file = os.path.join(model_dir, 'asap_final_%s_sample.npz' % model_str)
# Initialize the model, load the data
cfg, params, obs_data, um_data = fitting.initial_model(config_file, verbose=True)
# Load in the final sampling results
(mod_result_samples,
mod_result_chains,
mod_result_lnprob,
mod_result_best, _, _) = io.load_npz_results(result_file)
# The results from the 3-stage burn-in results
burnin_file_1 = os.path.join(model_dir, 'asap_final_%s_burnin_1.npz' % model_str)
burnin_file_2 = os.path.join(model_dir, 'asap_final_%s_burnin_2.npz' % model_str)
burnin_file_3 = os.path.join(model_dir, 'asap_final_%s_burnin_2.npz' % model_str)
# Load the burn-in results
(mod_burnin_samples_1,
mod_burnin_chains_1,
mod_burnin_lnprob_1,
mod_burnin_best_1, _, _) = io.load_npz_results(burnin_file_1)
(mod_burnin_samples_2,
mod_burnin_chains_2,
mod_burnin_lnprob_2,
mod_burnin_best_2, _, _) = io.load_npz_results(burnin_file_2)
(mod_burnin_samples_3,
mod_burnin_chains_3,
mod_burnin_lnprob_3,
mod_burnin_best_3, _, _) = io.load_npz_results(burnin_file_3)
mod_burnin_chains = np.concatenate([
mod_burnin_chains_1, mod_burnin_chains_2, mod_burnin_chains_3], axis=1)
_, n_step, n_dim = mod_result_chains.shape
mod_result_best = np.nanmean(
mod_result_chains[:, -int(n_step * 0.1):, :].reshape([-1, n_dim]), axis=0)
print("\n# Best model parameter:", mod_result_best)
parameters = mod_result_best
# Predict the stellar mass in inner and outer apertures
logms_inn, logms_tot, sig_logms, mask_use = predict_mstar_basic(
um_data['um_mock'], parameters, min_logms=10.5,
logmh_col=cfg['um']['logmh_col'], min_scatter=cfg['um']['min_scatter'],
pivot=cfg['um']['pivot_logmh'])
# Predict the SMFs and DeltaSigma profiles
um_smf_tot, um_smf_inn, um_dsigma = make_model_predictions(
mod_result_best, cfg, obs_data, um_data)
# Check the likelihood for SMF and DeltaSigma profiles
lnlike_smf, lnlike_dsigma = ln_likelihood(
mod_result_best, cfg, obs_data, um_data, sep_return=True)
# The useful part of the mock catalog
um_mock_use = um_data['um_mock'][mask_use]
print("\n# Best parameters: ", mod_result_best)
print("# ln(Likelihood) for SMFs : %8.4f" % lnlike_smf)
print("# ln(Likelihood) for DSigma : %8.4f" % lnlike_dsigma)
# Old result:
# -155.79388568381947 [ 6.03179566e-01 1.18461568e+01 -1.65986740e-02 2.63625215e-03
# 6.52983173e-01 -1.77764693e-01 3.77075453e-01]
###Output
_____no_output_____
###Markdown
Estimate the auto-correlation time* References: 1. [Autocorrelation analysis & convergence](https://emcee.readthedocs.io/en/latest/tutorials/autocorr/) 2. [Autocorrelation time: is taking the mean the right thing to do?](https://github.com/dfm/emcee/issues/209) 3. [A Smarter Autocorrelation Time](https://github.com/dfm/emcee/issues/214)
###Code
tau = np.mean(
[integrated_time(walker, c=5, tol=5, quiet=False) for walker in mod_burnin_chains_3[:, :, :]], axis=0)
print(tau)
tau = np.mean(
[integrated_time(walker, c=5, tol=5) for walker in mod_result_chains[:, 1000:, :]], axis=0)
print(tau)
###Output
[812.14676696]
###Markdown
Gelman-Rubin Test* Reference: 1. [Convergence of the Affine Invariant Ensemble Sampler](http://joergdietrich.github.io/emcee-convergence.html) 2. [The Gelman Rubin Statistic and emcee](http://greg-ashton.physics.monash.edu/the-gelman-rubin-statistic-and-emcee.html) * Note: you should not compute the G–R statistic using multiple chains in the same emcee ensemble because the chains are not independent!
###Code
convergence.gelman_rubin(mod_result_chains[:, 1000:, ])
###Output
_____no_output_____
###Markdown
Performs a Kullback-Leibler divergence test for convergence* Reference 1. [Convergency test in `prospector`](https://github.com/bd-j/prospector/blob/master/prospect/fitting/convergence.py)
###Code
pass_test, kl_test = convergence.convergence_check(
mod_result_chains[:, 1000:, :], convergence_check_interval=100, convergence_chunks=100,
convergence_stable_points_criteria=3, convergence_nhist=20,
convergence_kl_threshold=0.018)
if pass_test:
print("# Pass convergence test using K-L divergence")
else:
print("# Doest not pass convergence test using K-L divergence")
###Output
# Pass convergence test using K-L divergence
###Markdown
Corner and trace plot
###Code
params_label = [r'$a$', r'$b$', r'$c$', r'$d$',
r'$f_{\rm ins}$', r'$A_{\rm exs}$', r'$B_{\rm exs}$']
params_range = [(0.585, 0.622), (11.831, 11.854),
(-0.024, 0.007), (-0.005, 0.0039),
(0.629, 0.679),
(-0.22, -0.162), (0.29, 0.41)]
title_fmt = '.3f'
mod_samples_use = mod_result_chains[:, 100:, :].reshape([-1, 7])
mod_corner = plotting.plot_mcmc_corner(
mod_samples_use, params_label, truths=mod_result_best, truth_color='skyblue',
**{'title_fmt': title_fmt, 'ranges': params_range, 'plot_datapoints': False})
mod_trace = plotting.plot_mcmc_trace(
mod_result_chains, params_label,
mcmc_best=mod_result_best, mcmc_burnin=mod_burnin_chains,
burnin_alpha=0.15, trace_alpha=0.12)
fig_2 = plt.figure(figsize=(7.5, 7))
fig_2.subplots_adjust(left=0.16, right=0.995, bottom=0.13, top=0.995, wspace=0.00, hspace=0.00)
ax1 = fig_2.add_subplot(111)
ax1.grid(linestyle='--', linewidth=2, alpha=0.4, zorder=0)
for tick in ax1.xaxis.get_major_ticks():
tick.label.set_fontsize(25)
for tick in ax1.yaxis.get_major_ticks():
tick.label.set_fontsize(25)
# These are the SMFs and stellar mass data
obs_smf_tot = obs_data['smf_tot']
obs_smf_inn = obs_data['smf_inn']
obs_logms_tot = obs_data['mtot']
obs_logms_inn = obs_data['minn']
obs_smf_full = obs_data['smf_full']
# This is the model SMF with broader mass range
um_smf_tot_all = smf.get_smf_bootstrap(logms_tot, cfg['um']['volume'], 18, 11.0, 12.5, n_boots=1)
# Show the PRIMUS SMF
ax1.errorbar(obs_smf_full['logm_mean'][6:] + 0.17,
np.log10(obs_smf_full['smf'][6:]),
(np.log10(obs_smf_full['smf_upp'][6:]) - np.log10(obs_smf_full['smf'][6:])),
fmt='o', color='seagreen', ecolor='seagreen', alpha=0.9, marker='s', markersize=9,
label=r'$\mathrm{PRIMUS}$', zorder=0)
# Show the HSC SMFs
ax1.fill_between(obs_smf_tot['logm_mean'], np.log10(obs_smf_tot['smf_low']), np.log10(obs_smf_tot['smf_upp']),
facecolor='steelblue', edgecolor='none', interpolate=True, alpha=0.3,
label=r'$\mathrm{Data:\ Mtot}$')
ax1.fill_between(obs_smf_inn['logm_mean'], np.log10(obs_smf_inn['smf_low']), np.log10(obs_smf_inn['smf_upp']),
facecolor='lightsalmon', edgecolor='none', interpolate=True, alpha=0.3,
label=r'$\mathrm{Data:\ Minn}$')
ax1.scatter(obs_smf_inn['logm_mean'], np.log10(obs_smf_inn['smf']),
marker='h', c='r', s=60, label='__no_label__', alpha=0.3)
ax1.scatter(obs_smf_tot['logm_mean'], np.log10(obs_smf_tot['smf']),
marker='8', c='b', s=60, label='__no_label__', alpha=0.3)
# Predicted SMF
ax1.plot(obs_smf_inn['logm_mean'], np.log10(um_smf_inn),
linewidth=4, linestyle='-.', c='salmon', alpha=1.0, label=r'$\mathrm{UM:\ Minn}$')
ax1.plot(um_smf_tot_all['logm_mean'][2:], np.log10(um_smf_tot_all['smf'][2:]),
linewidth=4, linestyle='--', c='royalblue', alpha=0.8, label='__no_label__')
ax1.plot(obs_smf_tot['logm_mean'], np.log10(um_smf_tot),
linewidth=4, linestyle='-', c='royalblue', alpha=1.0, label=r'$\mathrm{UM:\ Mtot}$')
ax1.legend(fontsize=18, loc='upper right')
ax1.set_xlabel(r'$\log (M_{\star}/M_{\odot})$', fontsize=30)
ax1.set_ylabel((r'$\mathrm{d}N/\mathrm{d}\log M_{\star}\ $'
r'$[{\mathrm{Mpc}^{-3}}{\mathrm{dex}^{-1}}]$'), size=30)
_ = ax1.set_xlim(10.75, 12.29)
_ = ax1.set_ylim(-6.99, -2.09)
um_mhalo_tuple = predict_mhalo(
obs_data['wl_dsigma'], um_data['um_mock'][mask_use], logms_tot, logms_inn)
mod_dsig = plotting.plot_dsigma_profiles(
obs_data['wl_dsigma'], um_dsigma, um_mhalo=um_mhalo_tuple, reference=0)
###Output
_____no_output_____ |
SentimentAnalysis/Embedding/LabelEncoder.ipynb | ###Markdown
LabelEncoder
###Code
cities = ['London', 'Berlin', 'Berlin', 'New York', 'London']
from sklearn.preprocessing import LabelEncoder
encoder = LabelEncoder()
city_labels = encoder.fit_transform(cities)
city_labels
###Output
_____no_output_____
###Markdown
OneHotEncoder
###Code
from sklearn.preprocessing import OneHotEncoder
one_hot_encoder = OneHotEncoder(sparse=False)
city_labels = city_labels.reshape((5, 1))
one_hot_encoder.fit_transform(city_labels)
###Output
_____no_output_____ |
machine-learning-project-walkthrough/machine-learning-project-walkthrough/Machine Learning Project Part 2.ipynb | ###Markdown
Introduction: Machine Learning Project Part 2In this series of notebooks, we are working on a supervised, regression machine learning problem. Using real-world New York City building energy data, we want to predict the Energy Star Score of a building and determine the factors that influence the score.We are using the general outline of the machine learning pipeline to structure our project:1. Data cleaning and formatting2. Exploratory data analysis3. Feature engineering and selection4. Compare several machine learning models on a performance metric5. Perform hyperparameter tuning on the best model to optimize it for the problem6. Evaluate the best model on the testing set7. Interpret the model results to the extent possible8. Draw conclusions and write a well-documented reportThe first notebook covered steps 1-3, and in this notebook, we will cover 4-6. In this series, I focus more on the implementations rather than the details, and for those looking for more background into the machine learning methods, I recommend [Hands-On Machine Learning with Scikit-Learn and Tensorflow](http://shop.oreilly.com/product/0636920052289.do) by Aurelien Geron. This is an excellent resource for the basic theory behind the algorithms and how to use them effectively in Python! Imports We will use the standard data science and machine learning libraries in this project.
###Code
# Pandas and numpy for data manipulation
import pandas as pd
import numpy as np
# No warnings about setting value on copy of slice
pd.options.mode.chained_assignment = None
pd.set_option('display.max_columns', 60)
# Matplotlib for visualization
import matplotlib.pyplot as plt
%matplotlib inline
# Set default font size
plt.rcParams['font.size'] = 24
from IPython.core.pylabtools import figsize
# Seaborn for visualization
import seaborn as sns
sns.set(font_scale = 2)
# Imputing missing values and scaling values
from sklearn.preprocessing import Imputer, MinMaxScaler
# Machine Learning Models
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.svm import SVR
from sklearn.neighbors import KNeighborsRegressor
# Hyperparameter tuning
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
###Output
_____no_output_____
###Markdown
Read in DataFirst let's read in the formatted data from the previous notebook.
###Code
# Read in data into dataframes
train_features = pd.read_csv('data/training_features.csv')
test_features = pd.read_csv('data/testing_features.csv')
train_labels = pd.read_csv('data/training_labels.csv')
test_labels = pd.read_csv('data/testing_labels.csv')
# Display sizes of data
print('Training Feature Size: ', train_features.shape)
print('Testing Feature Size: ', test_features.shape)
print('Training Labels Size: ', train_labels.shape)
print('Testing Labels Size: ', test_labels.shape)
###Output
Training Feature Size: (6622, 64)
Testing Feature Size: (2839, 64)
Training Labels Size: (6622, 1)
Testing Labels Size: (2839, 1)
###Markdown
As a reminder, here is what the formatted data looks like. In the first notebook, we engineered a number features by taking the natural log of the variables, included two categorical variables, and selected a subset of features by removing highly collinear features.
###Code
train_features.head(12)
###Output
_____no_output_____
###Markdown
The `score` column contains the Energy Star Score, the target for our machine learning problem. The Energy Star Score is supposed to be a comparitive measurement of the energy efficiency of a building, although we saw there may be issues with how this is calculated in part one! Here's the distribution of the Energy Star Score.
###Code
figsize(8, 8)
# Histogram of the Energy Star Score
plt.style.use('fivethirtyeight')
plt.hist(train_labels['score'].dropna(), bins = 100);
plt.xlabel('Score'); plt.ylabel('Number of Buildings');
plt.title('ENERGY Star Score Distribution');
###Output
_____no_output_____
###Markdown
Evaluating and Comparing Machine Learning ModelsIn this section we will build, train, and evalute several machine learning methods for our supervised regression task. The objective is to determine which model holds the most promise for further development (such as hyperparameter tuning). We are comparing models using the __mean absolute error__. A baseline model that guessed the median value of the score was off by an __average of 25 points__. Imputing Missing Values Standard machine learning models cannot deal with missing values, and which means we have to find a way to fill these in or disard any features with missing values. Since we already removed features with more than 50% missing values in the first part, here we will focus on filling in these missing values, a process known as [imputation](https://en.wikipedia.org/wiki/Imputation_(statistics)). There are a number of methods for imputation but here we will use the relatively simple method of replacing missing values with the median of the column. ([Here is a more thorough discussion on imputing missing values](http://www.stat.columbia.edu/~gelman/arm/missing.pdf))In the code below, we create a [Scikit-learn `Imputer` object](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.Imputer.html) to fill in missing values with the median of the column. Notice that we train the imputer (using the `Imputer.fit` method) on the training data but not the testing data. We then transform (using `Imputer.transform`) both the training data and testing data. This means that the missing values in the testing set are filled in with the median value of the corresponding columns in the training set. [We have to do it this way](https://stackoverflow.com/a/46692001) rather than because at deployment time, we will have to impute the missing values in new observations based on the previous training data. This is one way to avoid the problem known as [data leakage](https://www.kaggle.com/dansbecker/data-leakage) where information from the testing set "leaks" into the training process.
###Code
# Create an imputer object with a median filling strategy
imputer = Imputer(strategy='median')
# Train on the training features
imputer.fit(train_features)
# Transform both training data and testing data
X = imputer.transform(train_features)
X_test = imputer.transform(test_features)
print('Missing values in training features: ', np.sum(np.isnan(X)))
print('Missing values in testing features: ', np.sum(np.isnan(X_test)))
# Make sure all values are finite
print(np.where(~np.isfinite(X)))
print(np.where(~np.isfinite(X_test)))
###Output
(array([], dtype=int64), array([], dtype=int64))
(array([], dtype=int64), array([], dtype=int64))
###Markdown
After imputation, all of the features are real-valued. For more sophisticated methods of imputation (although median values usually works well) check out [this article](https://www.tandfonline.com/doi/full/10.1080/1743727X.2014.979146) Scaling FeaturesThe final step to take before we can build our models is to [scale the features](https://en.wikipedia.org/wiki/Feature_scaling). This is necessary because features are in different units, and we want to normalize the features so the units do not affect the algorithm. [Linear Regression and Random Forest do not require feature scaling](https://stats.stackexchange.com/questions/121886/when-should-i-apply-feature-scaling-for-my-data), but other methods, such as support vector machines and k nearest neighbors, do require it because they take into account the Euclidean distance between observations. For this reason, it is a best practice to scale features when we are comparing multiple algorithms. There are two ways to [scale features](http://scikit-learn.org/stable/auto_examples/preprocessing/plot_scaling_importance.html):1. For each value, subtract the mean of the feature and divide by the standard deviation of the feature. This is known as standardization and results in each feature having a mean of 0 and a standard deviation of 1.2. For each value, subtract the minimum value of the feature and divide by the maximum minus the minimum for the feature (the range). This assures that all the values for a feature are between 0 and 1 and is called scaling to a range or normalization.Here is a good article about [normalization and standardization](https://machinelearningmastery.com/normalize-standardize-machine-learning-data-weka/).As with imputation, when we train the scaling object, we want to use only the training set. When we transform features, we will transform both the training set and the testing set.
###Code
# Create the scaler object with a range of 0-1
scaler = MinMaxScaler(feature_range=(0, 1))
# Fit on the training data
scaler.fit(X)
# Transform both the training and testing data
X = scaler.transform(X)
X_test = scaler.transform(X_test)
# Convert y to one-dimensional array (vector)
y = np.array(train_labels).reshape((-1, ))
y_test = np.array(test_labels).reshape((-1, ))
###Output
_____no_output_____
###Markdown
Models to EvaluateWe will compare five different machine learning models using the great [Scikit-Learn library](http://scikit-learn.org/stable/):1. Linear Regression2. Support Vector Machine Regression3. Random Forest Regression4. Gradient Boosting Regression5. K-Nearest Neighbors RegressionAgain, here I'm focusing on implementation rather than explaining how these work. In addition to Hands-On Machine Learning, another great resource (this one is free online) for reading about machine learning models is [An Introduction to Statistical Learning](http://www-bcf.usc.edu/~gareth/ISL/).To compare the models, we are going to be mostly using the Scikit-Learn defaults for the model hyperparameters. Generally these will perform decently, but should be optimized before actually using a model. At first, we just want to determine the baseline performance of each model, and then we can select the best performing model for further optimization using hyperparameter tuning. Remember that the default hyperparameters will get a model up and running, but nearly always should be adjusted using some sort of search to find the best settings for your problem! Here is what the Scikit-learn documentation [says about the defaults](https://arxiv.org/abs/1309.0238): __Sensible defaults__: Whenever an operation requires a user-defined parameter, an appropriate default value is defined by the library. The default value should cause the operation to be performed in a sensible way (giving a baseline solution for the task at hand.)One of the best parts about scikit-learn is that all models are implemented in an identical manner: once you know how to build one, you can implement an extremely diverse array of models. Here we will implement the entire training and testing procedures for a number of models in just a few lines of code.
###Code
# Function to calculate mean absolute error
def mae(y_true, y_pred):
return np.mean(abs(y_true - y_pred))
# Takes in a model, trains the model, and evaluates the model on the test set
def fit_and_evaluate(model):
# Train the model
model.fit(X, y)
# Make predictions and evalute
model_pred = model.predict(X_test)
model_mae = mae(y_test, model_pred)
# Return the performance metric
return model_mae
lr = LinearRegression()
lr_mae = fit_and_evaluate(lr)
print('Linear Regression Performance on the test set: MAE = %0.4f' % lr_mae)
svm = SVR(C = 1000, gamma = 0.1)
svm_mae = fit_and_evaluate(svm)
print('Support Vector Machine Regression Performance on the test set: MAE = %0.4f' % svm_mae)
random_forest = RandomForestRegressor(random_state=60)
random_forest_mae = fit_and_evaluate(random_forest)
print('Random Forest Regression Performance on the test set: MAE = %0.4f' % random_forest_mae)
gradient_boosted = GradientBoostingRegressor(random_state=60)
gradient_boosted_mae = fit_and_evaluate(gradient_boosted)
print('Gradient Boosted Regression Performance on the test set: MAE = %0.4f' % gradient_boosted_mae)
knn = KNeighborsRegressor(n_neighbors=10)
knn_mae = fit_and_evaluate(knn)
print('K-Nearest Neighbors Regression Performance on the test set: MAE = %0.4f' % knn_mae)
plt.style.use('fivethirtyeight')
figsize(8, 6)
# Dataframe to hold the results
model_comparison = pd.DataFrame({'model': ['Linear Regression', 'Support Vector Machine',
'Random Forest', 'Gradient Boosted',
'K-Nearest Neighbors'],
'mae': [lr_mae, svm_mae, random_forest_mae,
gradient_boosted_mae, knn_mae]})
# Horizontal bar chart of test mae
model_comparison.sort_values('mae', ascending = False).plot(x = 'model', y = 'mae', kind = 'barh',
color = 'red', edgecolor = 'black')
# Plot formatting
plt.ylabel(''); plt.yticks(size = 14); plt.xlabel('Mean Absolute Error'); plt.xticks(size = 14)
plt.title('Model Comparison on Test MAE', size = 20);
###Output
_____no_output_____
###Markdown
Depending on the run (the exact results change slighty each time), the gradient boosting regressor performs the best followed by the random forest. I have to admit that this is not the most fair comparison because we are using mostly the default hyperparameters. Especially with the Support Vector Regressor, the hyperparameters have a significant influence on performance. (the random forest and gradient boosting methods are great for starting out because the performance is less dependent on the model settings). Nonetheless, from these results, we can conclude that machine learning is applicable because all the models significantly outperform the baseline!From here, I am going to concentrate on optimizing the best model using hyperparamter tuning. Given the results here, I will concentrate on using the `GradientBoostingRegressor`. This is the Scikit-Learn implementation of [Gradient Boosted Trees](http://www.ccs.neu.edu/home/vip/teach/MLcourse/4_boosting/slides/gradient_boosting.pdf) which has won many [Kaggle competitions](http://matthewemery.ca/Why-Kagglers-Love-XGBoost/) in the past few years. The Scikit-Learn version is generally slower than the `XGBoost` version, but here we'll stick to Scikit-Learn because the syntax is more familiar. [Here's a guide](https://www.kaggle.com/dansbecker/learning-to-use-xgboost/code) to using the implementation in the `XGBoost` package. Model Optimization In machine learning, optimizing a model means finding the best set of hyperparameters for a particular problem. HyperparametersFirst off, we need to understand what [model hyperparameters are in contrast to model parameters](https://machinelearningmastery.com/difference-between-a-parameter-and-a-hyperparameter/) :* Model __hyperparameters__ are best thought of as settings for a machine learning algorithm that are tuned by the data scientist before training. Examples would be the number of trees in the random forest, or the number of neighbors used in K Nearest Neighbors Regression. * Model __parameters__ are what the model learns during training, such as the weights in the linear regression.We as data scientists control a model by choosing the hyperparameters, and these choices can have a significant effect on the final performance of the model (although usually not as great of an effect as getting more data or engineering features). [Tuning the model hyperparameters](http://scikit-learn.org/stable/modules/grid_search.html) controls the balance of under vs over fitting in a model. We can try to correct for under-fitting by making a more complex model, such as using more trees in a random forest or more layers in a deep neural network. A model that underfits has high bias, and occurs when our model does not have enough capacity (degrees of freedom) to learn the relationship between the features and the target. We can try to correct for overfitting by limiting the complexity of the model and applying regularization. This might mean decreasing the degree of a polynomial regression, or adding dropout layers to a deep neural network. A model that overfits has high variance and in effect has memorized the training set. Both underfitting and overfitting lead to poor generalization performance on the test set. The problem with choosing the hyperparameters is that no set will work best across all problems. Therefore, for every new dataset, we have to find the best settings. This can be a time-consuming process, but luckily there are several options for performing this procedure in Scikit-Learn. Even better, new libraries, such as [TPOT](https://epistasislab.github.io/tpot/) by epistasis labs, is aiming to do this process automatically for you! For now, we will stick to doing this manually (sort of) in Scikit-Learn, but stay tuned for an article on automatic model selection! Hyperparameter Tuning with Random Search and Cross ValidationWe can choose the best hyperparameters for a model through random search and cross validation. * Random search refers to the method in which we choose hyperparameters to evaluate: we define a range of options, and then randomly select combinations to try. This is in contrast to grid search which evaluates every single combination we specify. Generally, random search is better when we have limited knowledge of the best model hyperparameters and we can use random search to narrow down the options and then use grid search with a more limited range of options. * Cross validation is the method used to assess the performance of the hyperparameters. Rather than splitting the training set up into separate training and validation sets which reduces the amount of training data we can use, we use K-Fold Cross Validation. This means dividing the training data into K folds, and then going through an iterative process where we first train on K-1 of the folds and then evaluate performance on the kth fold. We repeat this process K times so eventually we will have tested on every example in the training data with the key that each iteration we are testing on data that we __did not train on__. At the end of K-fold cross validation, we take the average error on each of the K iterations as the final performance measure and then train the model on all the training data at once. The performance we record is then used to compare different combinations of hyperparameters. A picture of k-fold cross validation using k = 5 is shown below:Here we will implement random search with cross validation to select the optimal hyperparameters for the gradient boosting regressor. We first define a grid then peform an iterative process of: randomly sample a set of hyperparameters from the grid, evaluate the hyperparameters using 4-fold cross-validation, and then select the hyperparameters with the best performance. Of course we don't actually do this iteration ourselves, we let Scikit-Learn and `RandomizedSearchCV` do the process for us!
###Code
# Loss function to be optimized
loss = ['ls', 'lad', 'huber']
# Number of trees used in the boosting process
n_estimators = [100, 500, 900, 1100, 1500]
# Maximum depth of each tree
max_depth = [2, 3, 5, 10, 15]
# Minimum number of samples per leaf
min_samples_leaf = [1, 2, 4, 6, 8]
# Minimum number of samples to split a node
min_samples_split = [2, 4, 6, 10]
# Maximum number of features to consider for making splits
max_features = ['auto', 'sqrt', 'log2', None]
# Define the grid of hyperparameters to search
hyperparameter_grid = {'loss': loss,
'n_estimators': n_estimators,
'max_depth': max_depth,
'min_samples_leaf': min_samples_leaf,
'min_samples_split': min_samples_split,
'max_features': max_features}
###Output
_____no_output_____
###Markdown
We selected 6 different hyperparameters to tune in the gradient boosting regressor. These all will affect the model in different ways that are hard to determine ahead of time, and the only method for finding the best combination for a specific problem is to test them out! To read about the hyperparameters, I suggest taking a look at the [Scikit-Learn documentation](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingRegressor.htmlsklearn.ensemble.GradientBoostingRegressor). For now, just know that we are trying to find the best combination of hyperparameters and because there is no theory to tell us which will work best, we just have to evaluate them, like runnning an experiment! In the code below, we create the Randomized Search Object passing in the following parameters:* `estimator`: the model* `param_distributions`: the distribution of parameters we defined* `cv` the number of folds to use for k-fold cross validation* `n_iter`: the number of different combinations to try* `scoring`: which metric to use when evaluating candidates* `n_jobs`: number of cores to run in parallel (-1 will use all available)* `verbose`: how much information to display (1 displays a limited amount) * `return_train_score`: return the training score for each cross-validation fold* `random_state`: fixes the random number generator used so we get the same results every runThe Randomized Search Object is trained the same way as any other scikit-learn model. After training, we can compare all the different hyperparameter combinations and find the best performing one.
###Code
# Create the model to use for hyperparameter tuning
model = GradientBoostingRegressor(random_state = 42)
# Set up the random search with 4-fold cross validation
random_cv = RandomizedSearchCV(estimator=model,
param_distributions=hyperparameter_grid,
cv=4, n_iter=25,
scoring = 'neg_mean_absolute_error',
n_jobs = -1, verbose = 1,
return_train_score = True,
random_state=42)
# Fit on the training data
random_cv.fit(X, y)
###Output
Fitting 4 folds for each of 25 candidates, totalling 100 fits
###Markdown
Scikit-learn uses the negative mean absolute error for evaluation because it wants a metric to maximize. Therefore, a better score will be closer to 0. We can get the results of the randomized search into a dataframe, and sort the values by performance.
###Code
# Get all of the cv results and sort by the test performance
random_results = pd.DataFrame(random_cv.cv_results_).sort_values('mean_test_score', ascending = False)
random_results.head(10)
random_cv.best_estimator_
###Output
_____no_output_____
###Markdown
The best gradient boosted model has the following hyperparameters:* `loss = lad`* `n_estimators = 500`* `max_depth = 5`* `min_samples_leaf = 6`* `min_samples_split = 6`* `max_features = None` (This means that `max_features = n_features` according to the docs)Using random search is a good method to narrow down the possible hyperparameters to try. Initially, we had no idea which combination would work the best, but this at least narrows down the range of options.We could use the random search results to inform a grid search by creating a grid with hyperparameters close to those that worked best during the randomized search. However, rather than evaluating all of these settings again, I will focus on a single one, the number of trees in the forest (`n_estimators`). By varying only one hyperparameter, we can directly observe how it affects performance. In the case of the number of trees, we would expect to see a significant affect on the amount of under vs overfitting.Here we will use grid search with a grid that only has the `n_estimators` hyperparameter. We will evaluate a range of trees then plot the training and testing performance to get an idea of what increasing the number of trees does for our model. We will fix the other hyperparameters at the best values returned from random search to isolate the number of trees effect.
###Code
# Create a range of trees to evaluate
trees_grid = {'n_estimators': [100, 150, 200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750, 800]}
model = GradientBoostingRegressor(loss = 'lad', max_depth = 5,
min_samples_leaf = 6,
min_samples_split = 6,
max_features = None,
random_state = 42)
# Grid Search Object using the trees range and the random forest model
grid_search = GridSearchCV(estimator = model, param_grid=trees_grid, cv = 4,
scoring = 'neg_mean_absolute_error', verbose = 1,
n_jobs = -1, return_train_score = True)
# Fit the grid search
grid_search.fit(X, y)
# Get the results into a dataframe
results = pd.DataFrame(grid_search.cv_results_)
# Plot the training and testing error vs number of trees
figsize(8, 8)
plt.style.use('fivethirtyeight')
plt.plot(results['param_n_estimators'], -1 * results['mean_test_score'], label = 'Testing Error')
plt.plot(results['param_n_estimators'], -1 * results['mean_train_score'], label = 'Training Error')
plt.xlabel('Number of Trees'); plt.ylabel('Mean Abosolute Error'); plt.legend();
plt.title('Performance vs Number of Trees');
results.sort_values('mean_test_score', ascending = False).head(5)
###Output
_____no_output_____
###Markdown
From this plot, it's pretty clear that our model is [overfitting](https://elitedatascience.com/overfitting-in-machine-learning)! The training error is significantly lower than the testing error, which shows that the model is learning the training data very well but then is not able to generalize to the test data as well. Moveover, as the number of trees increases, the amount of overfitting increases. Both the test and training error decrease as the number of trees increase but the training error decreases more rapidly. There will always be a difference between the training error and testing error (the training error is always lower) but if there is a significant difference, we want to try and reduce overfitting, either by getting more training data or reducing the complexity of the model through hyperparameter tuning or regularization. [For the gradient boosting regressor](https://www.quora.com/How-do-you-correct-for-overfitting-for-a-Gradient-Boosted-Machine), some options include reducing the number of trees, reducing the max depth of each tree, and increasing the minimum number of samples in a leaf node. For anyone who wants to go further into the gradient boosting regressor, [here is a great article.](http://blog.kaggle.com/2017/01/23/a-kaggle-master-explains-gradient-boosting/) For now, we will use the model with the best performance and accept that it may be overfitting to the training set. Based on the cross validation results, the best model using 800 trees and achieves a cross validation error under 9. This indicates that the average cross-validation estimate of the Energy Star Score is within 9 points of the true answer! Evaluate Final Model on the Test SetWe will use the best model from hyperparameter tuning to make predictions on the testing set. Remember, our model has never seen the test set before, so this performance should be a good indicator of how the model would perform if deployed in the real world. For comparison, we can also look at the performance of the default model. The code below creates the final model, trains it (with timing), and evaluates on the test set.
###Code
# Default model
default_model = GradientBoostingRegressor(random_state = 42)
# Select the best model
final_model = grid_search.best_estimator_
final_model
%%timeit -n 1 -r 5
default_model.fit(X, y)
%%timeit -n 1 -r 5
final_model.fit(X, y)
default_pred = default_model.predict(X_test)
final_pred = final_model.predict(X_test)
print('Default model performance on the test set: MAE = %0.4f.' % mae(y_test, default_pred))
print('Final model performance on the test set: MAE = %0.4f.' % mae(y_test, final_pred))
###Output
Default model performance on the test set: MAE = 10.0118.
Final model performance on the test set: MAE = 9.0446.
###Markdown
The final model does out-perform the baseline model by about 10%, but at the cost of significantly increased running time (it's about 12 times slower on my machine). Machine learning is often a field of tradeoffs: bias vs variance, acccuracy vs interpretability, accuracy vs running time, and the final decision of which model to use depends on the situation. Here, the increase in run time is not an impediment, because while the relative difference is large, the absolute magnitude of the training time is not significant. In a different situation, the balance might not be the same so we would need to consider what we are optimizing for and the limitations we have to work with. To get a sense of the predictions, we can plot the distribution of true values on the test set and the predicted values on the test set.
###Code
figsize(8, 8)
# Density plot of the final predictions and the test values
sns.kdeplot(final_pred, label = 'Predictions')
sns.kdeplot(y_test, label = 'Values')
# Label the plot
plt.xlabel('Energy Star Score'); plt.ylabel('Density');
plt.title('Test Values and Predictions');
###Output
_____no_output_____
###Markdown
The distribution looks to be nearly the same although the density of the predicted values is closer to the median of the test values rather than to the actual peak at 100. It appears the model might be less accurate at predicting the extreme values and instead predicts values closer to the median. Another diagnostic plot is a histogram of the residuals. Ideally, we would hope that the residuals are normally distributed, meaning that the model is wrong the same amount in both directions (high and low).
###Code
figsize = (6, 6)
# Calculate the residuals
residuals = final_pred - y_test
# Plot the residuals in a histogram
plt.hist(residuals, color = 'red', bins = 20,
edgecolor = 'black')
plt.xlabel('Error'); plt.ylabel('Count')
plt.title('Distribution of Residuals');
###Output
_____no_output_____ |
additional_multi_plot_practice.ipynb | ###Markdown
We will continue to work with the fuel economy dataset in this workspace.
###Code
fuel_econ = pd.read_csv('./data/fuel_econ.csv')
fuel_econ.head()
###Output
_____no_output_____
###Markdown
**Task 1**: Practice creating a plot matrix, by depicting the relationship between five numeric variables in the fuel efficiency dataset: 'displ', 'co2', 'city', 'highway', and 'comb'. Do you see any interesting relationships that weren't highlighted previously?
###Code
fuel_stats = ['displ', 'co2', 'city', 'highway', 'comb']
g = sb.PairGrid(data=fuel_econ, vars=fuel_stats)
g = g.map_offdiag(plt.scatter)
g.map_diag(plt.hist);
# run this cell to check your work against ours
additionalplot_solution_1()
###Output
I set up my PairGrid to plot scatterplots off the diagonal and histograms on the diagonal. The intersections where 'co2' meets the fuel mileage measures are fairly interesting in how tight the curves are. You'll explore this more in the next task.
###Markdown
**Task 2**: The output of the preceding task pointed out a potentially interesting relationship between co2 emissions and overall fuel efficiency. Engineer a new variable that depicts CO2 emissions as a function of gallons of gas (g / gal). (The 'co2' variable is in units of g / mi, and the 'comb' variable is in units of mi / gal.) Then, plot this new emissions variable against engine size ('displ') and fuel type ('fuelType'). For this task, compare not just Premium Gasoline and Regular Gasoline, but also Diesel fuel.
###Code
fuel_econ.fuelType.unique()
fuel_econ['co2_g_gal'] = fuel_econ['co2'] * fuel_econ['comb']
df = fuel_econ.loc[fuel_econ['fuelType'].isin(['Premium Gasoline', 'Regular Gasoline', 'Diesel'])]
g = sb.FacetGrid(data=df, col='fuelType', height=5)
g.map(plt.scatter, 'co2_g_gal', 'displ', alpha=1/10)
g.set_xlabels('CO2 (g/gal)')
g.set_ylabels('Engine displacement (l)')
g.set_titles(col_template='{col_name}');
# run this cell to check your work against ours
additionalplot_solution_2()
###Output
Due to the high number of data points and their high amount of overlap, I've chosen to plot the data in a faceted plot. You can see that engine sizes are smaller for cars that use regular gasoline against those that use premium gas. Most cars fall in an emissions band a bit below 9 kg CO2 per gallon; diesel cars are consistently higher, a little above 10 kg CO2 per gallon. This makes sense, since a gallon of gas gets burned no matter how efficient the process. More strikingly, there's a smattering of points with much smaller emissions. If you inspect these points more closely you'll see that they represent hybrid cars that use battery energy in addition to conventional fuel! To pull these mechanically out of the dataset requires more data than that which was trimmed to create it - and additional research to understand why these points don't fit the normal CO2 bands.
|
distilbert_base_uncased_finetuned_emotion.ipynb | ###Markdown
###Code
!pip install datasets transformers[sentencepiece]
!pip install huggingface
from datasets import load_dataset
from transformers import AutoTokenizer, DataCollatorWithPadding
from datasets import load_dataset
dataset = load_dataset("emotion")
dataset
import pandas as pd
import numpy as np
arr = pd.Series(dataset['train']['label'])
arr.value_counts()
label_names = dataset['train'].features['label'].names
label_names
id2label = {k:v for k,v in enumerate(label_names)}
label2id = {v:k for k,v in id2label.items()}
id2label
from transformers import AutoTokenizer, DataCollatorWithPadding
checkpoint = "bert-base-cased"
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
def tokenize_function(example):
return tokenizer(example["text"], truncation=True)
tokenized_datasets = dataset.map(tokenize_function, batched=True)
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
tokenized_datasets['train']
tokenized_datasets = tokenized_datasets.remove_columns(['text'])
tokenized_datasets["train"].column_names
tokenized_datasets['train'][0]
from transformers import AutoModelForSequenceClassification
model = AutoModelForSequenceClassification.from_pretrained(
checkpoint,
id2label = id2label,
label2id = label2id,
)
model.config.num_labels
from transformers import TrainingArguments
args = TrainingArguments(
"distilbert-base-uncased-finetuned-emotion",
evaluation_strategy="epoch",
save_strategy="epoch",
learning_rate=2e-5,
num_train_epochs=3,
weight_decay=0.01,
per_device_train_batch_size = 64,
per_device_eval_batch_size = 64,
push_to_hub=True,
)
!huggingface-cli login
!git config --global credential.helper store
!apt install git-lfs
!pip install bert_score
from datasets import load_metric
def compute_metrics(eval_preds):
metric = load_metric("accuracy")
logits, labels = eval_preds
predictions = np.argmax(logits, axis=-1)
return metric.compute(predictions=predictions, references=labels)
from transformers import Trainer
trainer = Trainer(
model,
args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["validation"],
data_collator=data_collator,
tokenizer=tokenizer,
compute_metrics=compute_metrics,
)
trainer.train()
trainer.push_to_hub(commit_message="Training complete")
from transformers import pipeline
mood = pipeline('sentiment-analysis', model='Tahsin/distilbert-base-uncased-finetuned-emotion')
mood("I love my country")
###Output
_____no_output_____ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.