markdown
stringlengths 0
1.02M
| code
stringlengths 0
832k
| output
stringlengths 0
1.02M
| license
stringlengths 3
36
| path
stringlengths 6
265
| repo_name
stringlengths 6
127
|
---|---|---|---|---|---|
Comparison with detectron | def load_images(image_path):
files = os.listdir(frame_path)
files_name = [file_name for file_name in files if file_name.endswith('.jpg')]
files_name.sort()
frames = []
#read the first 100 images
for i, file_name in enumerate(files_name):
frame = cv2.imread(image_path + file_name)
frames.append(frame)
frames = np.array(frames)
return frames
seq_images = load_images(frame_path)
cfg = get_cfg()
# add project-specific config (e.g., TensorMask) here if you're not running a model in detectron2's core library
cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml"))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model
# Find a model from detectron2's model zoo. You can use the https://dl.fbaipublicfiles... url as well
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml")
predictor = DefaultPredictor(cfg)
# crop positive samples
def crop_objects(img, outputs, image_index, writer):
classes = outputs["instances"].pred_classes
boxes = outputs["instances"].pred_boxes.tensor.cpu().numpy()
#create dictionary to hold count of objects for image name
index = -1
count = 0
print()
test_images = []
test_labels = []
for class_index in classes:
# get count of class for part of image name
# class_index = int(classes[i])
if class_index == 0:
# get box coords
box = boxes[index]
# print(box)
xmin, ymin, xmax, ymax = box
# crop detection from image (take an additional 5 pixels around all edges)
cropped_img = img[int(ymin):int(ymax), int(xmin):int(xmax)]
test_images.append(cropped_img)
test_labels.append(0)
count+=1
writer.writerow([image_index+1, count])
return writer, test_images, test_labels
# returns an array of resized images and converts to grayscale
def ResizeImages(images, IMAGE_WIDTH, IMAGE_HEIGHT):
ResizedImages = np.zeros((len(images), IMAGE_HEIGHT, IMAGE_WIDTH))
for i in range(len(images)):
img = cv2.cvtColor(images[i], cv2.COLOR_BGR2GRAY)
ResizedImages[i] = cv2.resize(img, (IMAGE_WIDTH, IMAGE_HEIGHT))
return ResizedImages
def comparative_acc(Optimal_Clf, test_images, test_labels):
# resize and grayscale for HoG
R_test_images = ResizeImages(test_images, IMAGE_WIDTH, IMAGE_HEIGHT)
# print("R_train_images: ", R_train_images.shape)
# create HoG test features
test_features = HoG_features(R_test_images)
# print("trained_features_reshaped: ", test_features.shape)
# print("trained_features_reshaped[0]: ", test_features[0])
# accuracy
accuracy = predict(Optimal_Clf, test_features, test_labels)
# print(f"Gamma: {gamma}, C: {C}, Accuracy: {round(accuracy, 2)}%")
return accuracy
cfile = open(path + 'detectron_count.csv', 'w+', newline='')
accfile = open(path + 'accuracy_comparison.csv', 'w+', newline='')
writer = csv.writer(cfile)
writer.writerow(["id", "Count"])
acc_writer = csv.writer(accfile)
acc_writer.writerow(["id", "accuracy"])
# getting HoG features
IMAGE_WIDTH = 64 # same as train image
IMAGE_HEIGHT = 128
# use optimal parameter for model
gamma = "scale"
C = 10
test_img_set = []
for image_index in range(len(seq_images)):
try:
outputs = predictor(seq_images[image_index])
writer, test_images, test_labels = crop_objects(seq_images[image_index], outputs, image_index, writer)
accuracy = comparative_acc(Optimal_Clf, test_images, test_labels)
acc_writer.writerow([image_index+1, accuracy])
except Exception as e:
print(str(e))
cfile.close
accfile.close
v = Visualizer(seq_images[0][:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.0)
out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
cv2_imshow(out.get_image()[:, :, ::-1])
import imutils
def pyramid(image, scale=1.5, minSize=(30, 30)):
# yield the original image
yield image
# keep looping over the pyramid
while True:
# compute the new dimensions of the image and resize it
w = int(image.shape[1] / scale)
image = imutils.resize(image, width=w)
# if the resized image does not meet the supplied minimum
# size, then stop constructing the pyramid
if image.shape[0] < minSize[1] or image.shape[1] < minSize[0]:
break
# yield the next image in the pyramid
yield image
def sliding_window(image, stepSize, windowSize):
# slide a window across the image
for y in range(0, image.shape[0], stepSize):
for x in range(0, image.shape[1], stepSize):
# yield the current window
yield (x, y, image[y:y + windowSize[1], x:x + windowSize[0]])
def detectron_detect(img):
frame = np.array(img)
outputs = predictor(frame)
v = Visualizer(frame[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.0)
out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
cv2_imshow(out.get_image()[:, :, ::-1])
classes = outputs["instances"].pred_classes
boxes = outputs["instances"].pred_boxes.tensor.cpu().numpy()
#create dictionary to hold count of objects for image name
index = -1
count = 0
for class_index in classes:
index+=1
if class_index == 0:
# get box coords
box = boxes[index]
return class_index, box
(winW, winH) = (64, 128)
def window_predict(clf, image):
person_count = 0
# loop over the image pyramid
for resized in pyramid(image, scale=1.5):
# loop over the sliding window for each layer of the pyramid
for (x, y, window) in sliding_window(resized, stepSize=32, windowSize=(winW, winH)):
# if the window does not meet our desired window size, ignore it
if window.shape[0] != winH or window.shape[1] != winW:
# detectron predict
det_label, det_box = detectron_detect(window))
# svm prediction
# test feature: window
SVM_prediction = clf.predict(window)
# both predict true
if SVM_prediction == 1 and det_label == 0:
person_count += 1
SumIOU += bb_intersection_over_union(window, det_box)
# SVM predicts true || False Positive
elif SVM_prediction == 1:
SumIOU += 0
person_count += 1
# detectron predicts true || False Negative
elif det_label == 0:
SumIOU += 0
person_count += 1
# since we do not have a classifier, we'll just draw the window
clone = resized.copy()
cv2.rectangle(clone, (x, y), (x + winW, y + winH), (0, 255, 0), 2)
# cv2_imshow("Window", clone)
# cv2.waitKey(1)
# time.sleep(0.025)
AvgIOU = SumIOU / person_count
return AvgIOU
def bb_intersection_over_union(BoxA, BoxB):
A_xmin, A_ymin, A_xmax, A_ymax = BoxA
B_xmin, B_ymin, B_xmax, B_ymax = BoxB
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(A_xmin, B_xmin)
yA = max(A_ymin, B_ymin)
xB = min(A_xmax, B_xmax)
yB = min(A_ymax, B_ymax)
# compute the area of intersection rectangle
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
def LBP(images, radius):
P = 8 * radius
R = radius
features = []
eps = 1e-7
for img in images:
lbp = feature.local_binary_pattern(img, P, R)
(hist, _) = np.histogram(lbp.ravel(),bins=np.arange(0, P + 3),range=(0, P + 2))
# normalize the histogram
hist = hist.astype("float")
hist /= (hist.sum() + eps)
if np.isnan(hist.any()):
print("nan")
features.append(hist)
features=np.array(features)
return features | _____no_output_____ | MIT | Part2.ipynb | ismailfaruk/ECSE415-Final-Project |
Understanding Cross-Entropy Loss**Recall:** a loss function compares the predictions of a model with the correct labels to tell us how well the model is doing, and to help find out how we can update the model's parameters to improve its performance (using gradient descent).**Cross-entropy** is a loss function we can use to train a model when the output is one of several classes. For example, we have 10 classes to choose from when trying to predict which number an image of a single digit represents.*In this image, the number we are trying to predict belongs to the class representing the digit 8.*To use the cross-entropy loss, we need to have as many outputs from our model as the number of possible classes. The cross-entropy loss then enables us to train the model such that the value of the output corresponding to the correct prediction is high, and for the other outputs it is low.The first step of using the cross-entropy loss function is passing the raw outputs of the model through a **softmax layer**. A softmax layer squishes all the outputs of the model between 0 and 1. It also ensures that all these values combined add up to 1. | import torch
t = torch.tensor([[-9, 5, 10]], dtype=torch.double)
torch.softmax(t, dim=1) | _____no_output_____ | MIT | content/notebooks/2021-07-18-understanding-cross-entropy-loss.ipynb | mashruravi/homepage |
Mathematically, each of the values above is calculated as follows:We can create a function to calculate the softmax on our own as follows: | def softmax(x):
return torch.exp(x) / torch.exp(x).sum()
softmax(t) | _____no_output_____ | MIT | content/notebooks/2021-07-18-understanding-cross-entropy-loss.ipynb | mashruravi/homepage |
Each value can be interpreted as the confidence with which the model predicts the corresponding output as the correct class.Since the exponential function is used in the softmax layer, any raw output from the model that is slightly higher than another will be amplified by the softmax layer.*The exponential function amplifies even small differences in input values*This goes to show that the softmax layer tries its best to pick one single value as the correct model output. As a result, this layer works really well when trying to train a classifier that has to pick one correct category.On the other hand, if you want a model not to pick a class just because it has just a slightly higher output value, it is advisable to use the sigmoid function with each individual output.After the softmax layer, the second part of the cross-entropy loss is the **log likelihood**.By using the softmax layer, we have condensed the value of each output between 0 and 1. This greatly reduces the sensitivity of the model confidence. For example, a prediction of 0.999 can be interpreted as being 10 times more confident than a prediction of 0.99. However, on a softmax scale, the difference between the two values is minuscule - a mere 0.009!By taking the log of the values, we can amplify even such small differences.For example,$$\frac{0.999}{0.99} \times 100\% = \text{approx. 0.9\% difference in confidence}$$However,$$\frac{\log{0.999}}{\log{0.99}} \times 100\% = \text{approx. 10\% difference in confidence}$$On a log scale, number close to 0 are pushed towards negative infinity and numbers close to 1 are pushed towards 0.Let us now consider the model output value corresponding to the correct class. If we maximize this value, then all the other values will be automatically minimized (since all values have to add up to 1 because of the softmax layer).If the output of the model corresponding to the correct class is close to 1, its log value will be close to 0. However, if the model output is close to 0, its log value will be highly negative (close to negative infinity).We need the value of the loss function to be high when the prediction is incorrect, and low when the prediction is correct. We can have this with the log values if we drop the negative sign (or equivalently, multiply the value by -1). Then, when the model output is close to 0 for the correct class (incorrect prediction), the negative log value will be extremely high and when the model output is close to 1 for the correct class (correct prediction), the negative log value will be close to 0.We can then use this as a loss function to maximize the output of the model corresponding to the correct class. Like we saw before, this will automatically minimize the outputs of the other classes because of the softmax function.This combination of softmax and log-likelihood is the cross-entropy loss. To see how this all works with PyTorch, let us assume we have 3 data points that can belong to one of 5 classes.Assume our model produces the following output for these 3 data points: | model_output = torch.randn((3, 5))
model_output | _____no_output_____ | MIT | content/notebooks/2021-07-18-understanding-cross-entropy-loss.ipynb | mashruravi/homepage |
Let us also assume that the correct classes for these data points are as follows: | targets = torch.tensor([3, 0, 1])
targets | _____no_output_____ | MIT | content/notebooks/2021-07-18-understanding-cross-entropy-loss.ipynb | mashruravi/homepage |
We first pass these outputs through a softmax layer: | sm = torch.softmax(model_output, dim = 1)
sm | _____no_output_____ | MIT | content/notebooks/2021-07-18-understanding-cross-entropy-loss.ipynb | mashruravi/homepage |
As expected, all values have been squished between 0 and 1.We can also confirm that for each data point, the values sum up to 1: | sm.sum(dim=1) | _____no_output_____ | MIT | content/notebooks/2021-07-18-understanding-cross-entropy-loss.ipynb | mashruravi/homepage |
Next, we take the log of these values: | lg = torch.log(sm)
lg | _____no_output_____ | MIT | content/notebooks/2021-07-18-understanding-cross-entropy-loss.ipynb | mashruravi/homepage |
We can then use `nll_loss` (i.e. Negative Log Likelihood) that will find the mean of the values corresponding to the correct class. This function will also multiply the values by -1 for us before doing so. | import torch.nn.functional as F
loss = F.nll_loss(lg, targets)
loss | _____no_output_____ | MIT | content/notebooks/2021-07-18-understanding-cross-entropy-loss.ipynb | mashruravi/homepage |
We can manually verify this for the 3 data points: | -1 * (lg[0][targets[0]] + lg[1][targets[1]] + lg[2][targets[2]]) / 3 | _____no_output_____ | MIT | content/notebooks/2021-07-18-understanding-cross-entropy-loss.ipynb | mashruravi/homepage |
Note that the `nll_loss` function assumes that the log has been taken before the values are passed to the function.PyTorch has a `log_softmax` function that combines softmax with log in one step. We can use that function to achieve the same result as follows: | lsm = F.log_softmax(model_output, dim = 1)
loss = F.nll_loss(lsm, targets)
loss | _____no_output_____ | MIT | content/notebooks/2021-07-18-understanding-cross-entropy-loss.ipynb | mashruravi/homepage |
PyTorch also has a cross-entropy loss that can be used directly on raw model outputs: | F.cross_entropy(model_output, targets) | _____no_output_____ | MIT | content/notebooks/2021-07-18-understanding-cross-entropy-loss.ipynb | mashruravi/homepage |
--- Some websites to open up for class: - [Overleaf](https://v2.overleaf.com/)- [Overleaf Docs and Help](https://v2.overleaf.com/learn)- [Latex Symbols](https://en.wikipedia.org/wiki/Wikipedia:LaTeX_symbols)- [Latex draw symbols](http://detexify.kirelabs.org/classify.html)- [The SAO/NASA Astrophysics Data System](https://ui.adsabs.harvard.edu/classic-form)---- [Latex wikibook](https://en.wikibooks.org/wiki/LaTeX) --- $\LaTeX$ Assignment | -----------------------------------------------------------------------------
LaTeX homework - Create a LaTeX document with references.
-----------------------------------------------------------------------------
Start with the file: FirstLast.tex
Minimum required elements:
* Between 2 and 4 pages in length (pages > 4 will be ignored).
* At least two paragraphs of text (the text should be coherent).
* At least 5 references from ADS.
* http://adsabs.harvard.edu/abstract_service.html
* Make sure to \citep{} or \citet{} the references in your paper
* The equation on the blackboard.
* One (or more) equation(s) of your choice.
* Include the plot you generated last class (MyCoolPlot.png)
* One other plot/image (do not reuse and old one!)
* One table of at least 4 columns and 4 rows.
* Bonus points given for interesting content!
-----------------------------------------------------------------------------
Create a PDF file:
Save the file as FirstLast.pdf (i.e. TobySmith.pdf)
Upload the PDF to the class canvas site
-----------------------------------------------------------------------------
Deadline: Monday Mar 11 - 10pm
----------------------------------------------------------------------------- | _____no_output_____ | MIT | LaTeX_Assignment.ipynb | UWashington-Astro300/Astro300-Wtr19 |
`pandas` can output $\LaTeX$ tables | import pandas as pd
my_table = pd.read_csv('./Data/Zodiac.csv')
my_table[0:3]
print(my_table.to_latex(index=False)) | _____no_output_____ | MIT | LaTeX_Assignment.ipynb | UWashington-Astro300/Astro300-Wtr19 |
Read the CSV and Perform Basic Data Cleaning | df = pd.read_csv("exoplanet_data.csv")
# Drop the null columns where all values are null
df = df.dropna(axis='columns', how='all')
# Drop the null rows
df = df.dropna()
df.head()
df = df[df["koi_disposition"] != 'CANDIDATE']
df["koi_disposition"].value_counts() | _____no_output_____ | ADSL | Mokerlund's_solution/Ipynb_and_code/model_1.ipynb | mokerlund/machine_learning_classification_challenge |
Select your features (columns) | # Set features. This will also be used as your x values.
selected_features = df[['koi_period', 'koi_impact', 'koi_duration', 'koi_depth', 'koi_prad', 'koi_teq', 'koi_insol', 'koi_steff', 'koi_slogg', 'koi_srad']]
selected_features.koi_period.astype(float) | _____no_output_____ | ADSL | Mokerlund's_solution/Ipynb_and_code/model_1.ipynb | mokerlund/machine_learning_classification_challenge |
Create a Train Test SplitUse `koi_disposition` for the y values | selected_features['koi_disposition_dummy'] = selected_features.koi_disposition.map({'FALSE POSITIVE':0, 'CONFIRMED':1})
y = selected_features['koi_disposition_dummy']
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(selected_features, y, random_state=0)
y_train.head() | _____no_output_____ | ADSL | Mokerlund's_solution/Ipynb_and_code/model_1.ipynb | mokerlund/machine_learning_classification_challenge |
Pre-processingScale the data using the MinMaxScaler and perform some feature selection | # Scale your data
from sklearn.preprocessing import MinMaxScaler
minmax = MinMaxScaler()
minmax_fitted = minmax.fit(X_train)
X_trains = minmax_fitted.transform(X_train)
X_tests = minmax_fitted.transform(X_test)
# y_trains = minmax.fit_transform(y_train)
# y_tests = minmax.fit_transform(y_test) | _____no_output_____ | ADSL | Mokerlund's_solution/Ipynb_and_code/model_1.ipynb | mokerlund/machine_learning_classification_challenge |
Train the Model Not using scaled | from sklearn.cluster import KMeans
from sklearn import metrics
model1 = KMeans(n_clusters=3)
model1.fit(X_train)
# Predict the clusters
pred = model1.predict(X_test)
pred_train = model1.predict(X_train)
# testing score
score = metrics.f1_score(y_test, pred, pos_label=list(set(y_test)), average=None)
# training score
score_train = metrics.f1_score(y_train, pred_train, pos_label=list(set(y_train)), average=None)
print(f"Training Data Score: {score_train[0]}")
print("-----------------------------------------------------")
print(f"Testing Data Score: {score[0]}") | Training Data Score: 0.7941800545619885
-----------------------------------------------------
Testing Data Score: 0.7972789115646258
| ADSL | Mokerlund's_solution/Ipynb_and_code/model_1.ipynb | mokerlund/machine_learning_classification_challenge |
Using scaled | kmeansS = KMeans(n_clusters=3)
kmeansS.fit(X_trains)
# Predict the clusters
preds = kmeansS.predict(X_tests)
pred_trains = kmeansS.predict(X_trains)
# testing score
scores = metrics.f1_score(y_test, preds, pos_label=list(set(y_test)), average=None)
# training score
score_trains = metrics.f1_score(y_train, pred_trains, pos_label=list(set(y_train)), average=None)
print(f"Testing score: {kmeansS.score(X_tests, y_test)}")
print(f"Training score: {kmeansS.score(X_trains, y_train)}")
print(scores)
print(score_trains) | Testing score: -29.14663974446135
Training score: -87.19826994353079
[0.21327968 0.5829904 0. ]
[0.23431242 0.59401416 0. ]
| ADSL | Mokerlund's_solution/Ipynb_and_code/model_1.ipynb | mokerlund/machine_learning_classification_challenge |
Save the Model | # save your model by updating "your_name" with your name
# and "your_model" with your model variable
# be sure to turn this in to BCS
# if joblib fails to import, try running the command to install in terminal/git-bash
import joblib
filename = 'megan_okerlund_model1.sav'
joblib.dump(model1, filename) | _____no_output_____ | ADSL | Mokerlund's_solution/Ipynb_and_code/model_1.ipynb | mokerlund/machine_learning_classification_challenge |
Transfer LearningIn this notebook, you'll learn how to use pre-trained networks to solved challenging problems in computer vision. Specifically, you'll use networks trained on [ImageNet](http://www.image-net.org/) [available from torchvision](http://pytorch.org/docs/0.3.0/torchvision/models.html). ImageNet is a massive dataset with over 1 million labeled images in 1000 categories. It's used to train deep neural networks using an architecture called convolutional layers. I'm not going to get into the details of convolutional networks here, but if you want to learn more about them, please [watch this](https://www.youtube.com/watch?v=2-Ol7ZB0MmU).Once trained, these models work astonishingly well as feature detectors for images they weren't trained on. Using a pre-trained network on images not in the training set is called transfer learning. Here we'll use transfer learning to train a network that can classify our cat and dog photos with near perfect accuracy.With `torchvision.models` you can download these pre-trained networks and use them in your applications. We'll include `models` in our imports now. | %matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models | _____no_output_____ | MIT | intro-to-pytorch/Part 8 - Transfer Learning (Exercises).ipynb | AdelNamani/deep-learning-v2-pytorch |
Most of the pretrained models require the input to be 224x224 images. Also, we'll need to match the normalization used when the models were trained. Each color channel was normalized separately, the means are `[0.485, 0.456, 0.406]` and the standard deviations are `[0.229, 0.224, 0.225]`. | ! curl -O "https://s3.amazonaws.com/content.udacity-data.com/nd089/Cat_Dog_data.zip"
! unzip Cat_Dog_data.zip
data_dir = 'Cat_Dog_data'
# TODO: Define transforms for the training data and testing data
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
test_transforms = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# Pass transforms in here, then run the next cell to see how the transforms look
train_data = datasets.ImageFolder(data_dir + '/train', transform=train_transforms)
test_data = datasets.ImageFolder(data_dir + '/test', transform=test_transforms)
trainloader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True)
testloader = torch.utils.data.DataLoader(test_data, batch_size=64) | _____no_output_____ | MIT | intro-to-pytorch/Part 8 - Transfer Learning (Exercises).ipynb | AdelNamani/deep-learning-v2-pytorch |
We can load in a model such as [DenseNet](http://pytorch.org/docs/0.3.0/torchvision/models.htmlid5). Let's print out the model architecture so we can see what's going on. | model = models.densenet121(pretrained=True)
model | _____no_output_____ | MIT | intro-to-pytorch/Part 8 - Transfer Learning (Exercises).ipynb | AdelNamani/deep-learning-v2-pytorch |
This model is built out of two main parts, the features and the classifier. The features part is a stack of convolutional layers and overall works as a feature detector that can be fed into a classifier. The classifier part is a single fully-connected layer `(classifier): Linear(in_features=1024, out_features=1000)`. This layer was trained on the ImageNet dataset, so it won't work for our specific problem. That means we need to replace the classifier, but the features will work perfectly on their own. In general, I think about pre-trained networks as amazingly good feature detectors that can be used as the input for simple feed-forward classifiers. | # Freeze parameters so we don't backprop through them
for param in model.parameters():
param.requires_grad = False
from collections import OrderedDict
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(1024, 500)),
('relu', nn.ReLU()),
('fc2', nn.Linear(500, 2)),
('output', nn.LogSoftmax(dim=1))
]))
model.classifier = classifier | _____no_output_____ | MIT | intro-to-pytorch/Part 8 - Transfer Learning (Exercises).ipynb | AdelNamani/deep-learning-v2-pytorch |
With our model built, we need to train the classifier. However, now we're using a **really deep** neural network. If you try to train this on a CPU like normal, it will take a long, long time. Instead, we're going to use the GPU to do the calculations. The linear algebra computations are done in parallel on the GPU leading to 100x increased training speeds. It's also possible to train on multiple GPUs, further decreasing training time.PyTorch, along with pretty much every other deep learning framework, uses [CUDA](https://developer.nvidia.com/cuda-zone) to efficiently compute the forward and backwards passes on the GPU. In PyTorch, you move your model parameters and other tensors to the GPU memory using `model.to('cuda')`. You can move them back from the GPU with `model.to('cpu')` which you'll commonly do when you need to operate on the network output outside of PyTorch. As a demonstration of the increased speed, I'll compare how long it takes to perform a forward and backward pass with and without a GPU. | import time
for device in ['cpu', 'cuda']:
criterion = nn.NLLLoss()
# Only train the classifier parameters, feature parameters are frozen
optimizer = optim.Adam(model.classifier.parameters(), lr=0.001)
model.to(device)
for ii, (inputs, labels) in enumerate(trainloader):
# Move input and label tensors to the GPU
inputs, labels = inputs.to(device), labels.to(device)
start = time.time()
outputs = model.forward(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
if ii==3:
break
print(f"Device = {device}; Time per batch: {(time.time() - start)/3:.3f} seconds") | Device = cpu; Time per batch: 4.035 seconds
Device = cuda; Time per batch: 0.008 seconds
| MIT | intro-to-pytorch/Part 8 - Transfer Learning (Exercises).ipynb | AdelNamani/deep-learning-v2-pytorch |
You can write device agnostic code which will automatically use CUDA if it's enabled like so:```python at beginning of the scriptdevice = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")... then whenever you get a new Tensor or Module this won't copy if they are already on the desired deviceinput = data.to(device)model = MyModule(...).to(device)```From here, I'll let you finish training the model. The process is the same as before except now your model is much more powerful. You should get better than 95% accuracy easily.>**Exercise:** Train a pretrained models to classify the cat and dog images. Continue with the DenseNet model, or try ResNet, it's also a good model to try out first. Make sure you are only training the classifier and the parameters for the features part are frozen. | ## TODO: Use a pretrained model to classify the cat and dog images
sum([p.numel() for p in model.parameters()])
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = models.densenet121(pretrained=True)
# Freeze parameters so we don't backprop through them
for param in model.parameters():
param.requires_grad = False
model.classifier = nn.Sequential(nn.Linear(1024, 256),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(256, 2),
nn.LogSoftmax(dim=1))
criterion = nn.NLLLoss()
# Only train the classifier parameters, feature parameters are frozen
optimizer = optim.Adam(model.classifier.parameters(), lr=0.003)
model.to(device)
epochs = 1
steps = 0
running_loss = 0
print_every = 5
train_losses, test_losses = [], []
for e in range(epochs):
running_loss = 0
model.train()
for images, labels in trainloader:
steps += 1
images, labels = images.to(device), labels.to(device)
optimizer.zero_grad()
log_ps = model(images)
loss = criterion(log_ps, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
test_loss = 0
accuracy = 0
model.eval()
# Turn off gradients for validation, saves memory and computations
with torch.no_grad():
for images, labels in testloader:
images, labels = images.to(device), labels.to(device)
log_ps = model(images)
test_loss += criterion(log_ps, labels)
ps = torch.exp(log_ps)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor))
train_losses.append(running_loss/len(trainloader))
test_losses.append(test_loss/len(testloader))
print("Epoch: {}/{}.. ".format(e+1, epochs),
"Training Loss: {:.3f}.. ".format(running_loss/len(trainloader)),
"Test Loss: {:.3f}.. ".format(test_loss/len(testloader)),
"Test Accuracy: {:.3f}".format(accuracy/len(testloader)))
| _____no_output_____ | MIT | intro-to-pytorch/Part 8 - Transfer Learning (Exercises).ipynb | AdelNamani/deep-learning-v2-pytorch |
Declare data | edges = pd.read_csv('../../../assets/energy.csv')
edges.head(5) | _____no_output_____ | BSD-3-Clause | examples/gallery/demos/bokeh/energy_sankey.ipynb | jsignell/holoviews |
Plot | hv.Sankey(edges).options(label_position='left') | _____no_output_____ | BSD-3-Clause | examples/gallery/demos/bokeh/energy_sankey.ipynb | jsignell/holoviews |
**Chapter 12 – Custom Models and Training with TensorFlow** _This notebook contains all the sample code in chapter 12._ Setup First, let's import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures. We also check that Python 3.5 or later is installed (although Python 2.x may work, it is deprecated so we strongly recommend you use Python 3 instead), as well as Scikit-Learn ≥0.20 and TensorFlow ≥2.0. | # Python ≥3.5 is required
import sys
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
import sklearn
assert sklearn.__version__ >= "0.20"
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
# TensorFlow ≥2.4 is required in this notebook
# Earlier 2.x versions will mostly work the same, but with a few bugs
import tensorflow as tf
from tensorflow import keras
assert tf.__version__ >= "2.4"
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
np.random.seed(42)
tf.random.set_seed(42)
# To plot pretty figures
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "deep"
IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID)
os.makedirs(IMAGES_PATH, exist_ok=True)
def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format=fig_extension, dpi=resolution) | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Tensors and operations Tensors | tf.constant([[1., 2., 3.], [4., 5., 6.]]) # matrix
tf.constant(42) # scalar
t = tf.constant([[1., 2., 3.], [4., 5., 6.]])
t
t.shape
t.dtype | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Indexing | t[:, 1:]
t[..., 1, tf.newaxis] | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Ops | t + 10
tf.square(t)
t @ tf.transpose(t) | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Using `keras.backend` | from tensorflow import keras
K = keras.backend
K.square(K.transpose(t)) + 10 | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
From/To NumPy | a = np.array([2., 4., 5.])
tf.constant(a)
t.numpy()
np.array(t)
tf.square(a)
np.square(t) | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Conflicting Types | try:
tf.constant(2.0) + tf.constant(40)
except tf.errors.InvalidArgumentError as ex:
print(ex)
try:
tf.constant(2.0) + tf.constant(40., dtype=tf.float64)
except tf.errors.InvalidArgumentError as ex:
print(ex)
t2 = tf.constant(40., dtype=tf.float64)
tf.constant(2.0) + tf.cast(t2, tf.float32) | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Strings | tf.constant(b"hello world")
tf.constant("café")
u = tf.constant([ord(c) for c in "café"])
u
b = tf.strings.unicode_encode(u, "UTF-8")
tf.strings.length(b, unit="UTF8_CHAR")
tf.strings.unicode_decode(b, "UTF-8") | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
String arrays | p = tf.constant(["Café", "Coffee", "caffè", "咖啡"])
tf.strings.length(p, unit="UTF8_CHAR")
r = tf.strings.unicode_decode(p, "UTF8")
r
print(r) | <tf.RaggedTensor [[67, 97, 102, 233], [67, 111, 102, 102, 101, 101], [99, 97, 102, 102, 232], [21654, 21857]]>
| Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Ragged tensors | print(r[1])
print(r[1:3])
r2 = tf.ragged.constant([[65, 66], [], [67]])
print(tf.concat([r, r2], axis=0))
r3 = tf.ragged.constant([[68, 69, 70], [71], [], [72, 73]])
print(tf.concat([r, r3], axis=1))
tf.strings.unicode_encode(r3, "UTF-8")
r.to_tensor() | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Sparse tensors | s = tf.SparseTensor(indices=[[0, 1], [1, 0], [2, 3]],
values=[1., 2., 3.],
dense_shape=[3, 4])
print(s)
tf.sparse.to_dense(s)
s2 = s * 2.0
try:
s3 = s + 1.
except TypeError as ex:
print(ex)
s4 = tf.constant([[10., 20.], [30., 40.], [50., 60.], [70., 80.]])
tf.sparse.sparse_dense_matmul(s, s4)
s5 = tf.SparseTensor(indices=[[0, 2], [0, 1]],
values=[1., 2.],
dense_shape=[3, 4])
print(s5)
try:
tf.sparse.to_dense(s5)
except tf.errors.InvalidArgumentError as ex:
print(ex)
s6 = tf.sparse.reorder(s5)
tf.sparse.to_dense(s6) | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Sets | set1 = tf.constant([[2, 3, 5, 7], [7, 9, 0, 0]])
set2 = tf.constant([[4, 5, 6], [9, 10, 0]])
tf.sparse.to_dense(tf.sets.union(set1, set2))
tf.sparse.to_dense(tf.sets.difference(set1, set2))
tf.sparse.to_dense(tf.sets.intersection(set1, set2)) | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Variables | v = tf.Variable([[1., 2., 3.], [4., 5., 6.]])
v.assign(2 * v)
v[0, 1].assign(42)
v[:, 2].assign([0., 1.])
try:
v[1] = [7., 8., 9.]
except TypeError as ex:
print(ex)
v.scatter_nd_update(indices=[[0, 0], [1, 2]],
updates=[100., 200.])
sparse_delta = tf.IndexedSlices(values=[[1., 2., 3.], [4., 5., 6.]],
indices=[1, 0])
v.scatter_update(sparse_delta) | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Tensor Arrays | array = tf.TensorArray(dtype=tf.float32, size=3)
array = array.write(0, tf.constant([1., 2.]))
array = array.write(1, tf.constant([3., 10.]))
array = array.write(2, tf.constant([5., 7.]))
array.read(1)
array.stack()
mean, variance = tf.nn.moments(array.stack(), axes=0)
mean
variance | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Custom loss function Let's start by loading and preparing the California housing dataset. We first load it, then split it into a training set, a validation set and a test set, and finally we scale it: | from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
housing = fetch_california_housing()
X_train_full, X_test, y_train_full, y_test = train_test_split(
housing.data, housing.target.reshape(-1, 1), random_state=42)
X_train, X_valid, y_train, y_valid = train_test_split(
X_train_full, y_train_full, random_state=42)
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_valid_scaled = scaler.transform(X_valid)
X_test_scaled = scaler.transform(X_test)
def huber_fn(y_true, y_pred):
error = y_true - y_pred
is_small_error = tf.abs(error) < 1
squared_loss = tf.square(error) / 2
linear_loss = tf.abs(error) - 0.5
return tf.where(is_small_error, squared_loss, linear_loss)
plt.figure(figsize=(8, 3.5))
z = np.linspace(-4, 4, 200)
plt.plot(z, huber_fn(0, z), "b-", linewidth=2, label="huber($z$)")
plt.plot(z, z**2 / 2, "b:", linewidth=1, label=r"$\frac{1}{2}z^2$")
plt.plot([-1, -1], [0, huber_fn(0., -1.)], "r--")
plt.plot([1, 1], [0, huber_fn(0., 1.)], "r--")
plt.gca().axhline(y=0, color='k')
plt.gca().axvline(x=0, color='k')
plt.axis([-4, 4, 0, 4])
plt.grid(True)
plt.xlabel("$z$")
plt.legend(fontsize=14)
plt.title("Huber loss", fontsize=14)
plt.show()
input_shape = X_train.shape[1:]
model = keras.models.Sequential([
keras.layers.Dense(30, activation="selu", kernel_initializer="lecun_normal",
input_shape=input_shape),
keras.layers.Dense(1),
])
model.compile(loss=huber_fn, optimizer="nadam", metrics=["mae"])
model.fit(X_train_scaled, y_train, epochs=2,
validation_data=(X_valid_scaled, y_valid)) | Epoch 1/2
363/363 [==============================] - 1s 2ms/step - loss: 1.0443 - mae: 1.4660 - val_loss: 0.2862 - val_mae: 0.5866
Epoch 2/2
363/363 [==============================] - 0s 737us/step - loss: 0.2379 - mae: 0.5407 - val_loss: 0.2382 - val_mae: 0.5281
| Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Saving/Loading Models with Custom Objects | model.save("my_model_with_a_custom_loss.h5")
model = keras.models.load_model("my_model_with_a_custom_loss.h5",
custom_objects={"huber_fn": huber_fn})
model.fit(X_train_scaled, y_train, epochs=2,
validation_data=(X_valid_scaled, y_valid))
def create_huber(threshold=1.0):
def huber_fn(y_true, y_pred):
error = y_true - y_pred
is_small_error = tf.abs(error) < threshold
squared_loss = tf.square(error) / 2
linear_loss = threshold * tf.abs(error) - threshold**2 / 2
return tf.where(is_small_error, squared_loss, linear_loss)
return huber_fn
model.compile(loss=create_huber(2.0), optimizer="nadam", metrics=["mae"])
model.fit(X_train_scaled, y_train, epochs=2,
validation_data=(X_valid_scaled, y_valid))
model.save("my_model_with_a_custom_loss_threshold_2.h5")
model = keras.models.load_model("my_model_with_a_custom_loss_threshold_2.h5",
custom_objects={"huber_fn": create_huber(2.0)})
model.fit(X_train_scaled, y_train, epochs=2,
validation_data=(X_valid_scaled, y_valid))
class HuberLoss(keras.losses.Loss):
def __init__(self, threshold=1.0, **kwargs):
self.threshold = threshold
super().__init__(**kwargs)
def call(self, y_true, y_pred):
error = y_true - y_pred
is_small_error = tf.abs(error) < self.threshold
squared_loss = tf.square(error) / 2
linear_loss = self.threshold * tf.abs(error) - self.threshold**2 / 2
return tf.where(is_small_error, squared_loss, linear_loss)
def get_config(self):
base_config = super().get_config()
return {**base_config, "threshold": self.threshold}
model = keras.models.Sequential([
keras.layers.Dense(30, activation="selu", kernel_initializer="lecun_normal",
input_shape=input_shape),
keras.layers.Dense(1),
])
model.compile(loss=HuberLoss(2.), optimizer="nadam", metrics=["mae"])
model.fit(X_train_scaled, y_train, epochs=2,
validation_data=(X_valid_scaled, y_valid))
model.save("my_model_with_a_custom_loss_class.h5")
model = keras.models.load_model("my_model_with_a_custom_loss_class.h5",
custom_objects={"HuberLoss": HuberLoss})
model.fit(X_train_scaled, y_train, epochs=2,
validation_data=(X_valid_scaled, y_valid))
model.loss.threshold | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Other Custom Functions | keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
def my_softplus(z): # return value is just tf.nn.softplus(z)
return tf.math.log(tf.exp(z) + 1.0)
def my_glorot_initializer(shape, dtype=tf.float32):
stddev = tf.sqrt(2. / (shape[0] + shape[1]))
return tf.random.normal(shape, stddev=stddev, dtype=dtype)
def my_l1_regularizer(weights):
return tf.reduce_sum(tf.abs(0.01 * weights))
def my_positive_weights(weights): # return value is just tf.nn.relu(weights)
return tf.where(weights < 0., tf.zeros_like(weights), weights)
layer = keras.layers.Dense(1, activation=my_softplus,
kernel_initializer=my_glorot_initializer,
kernel_regularizer=my_l1_regularizer,
kernel_constraint=my_positive_weights)
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Dense(30, activation="selu", kernel_initializer="lecun_normal",
input_shape=input_shape),
keras.layers.Dense(1, activation=my_softplus,
kernel_regularizer=my_l1_regularizer,
kernel_constraint=my_positive_weights,
kernel_initializer=my_glorot_initializer),
])
model.compile(loss="mse", optimizer="nadam", metrics=["mae"])
model.fit(X_train_scaled, y_train, epochs=2,
validation_data=(X_valid_scaled, y_valid))
model.save("my_model_with_many_custom_parts.h5")
model = keras.models.load_model(
"my_model_with_many_custom_parts.h5",
custom_objects={
"my_l1_regularizer": my_l1_regularizer,
"my_positive_weights": my_positive_weights,
"my_glorot_initializer": my_glorot_initializer,
"my_softplus": my_softplus,
})
class MyL1Regularizer(keras.regularizers.Regularizer):
def __init__(self, factor):
self.factor = factor
def __call__(self, weights):
return tf.reduce_sum(tf.abs(self.factor * weights))
def get_config(self):
return {"factor": self.factor}
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Dense(30, activation="selu", kernel_initializer="lecun_normal",
input_shape=input_shape),
keras.layers.Dense(1, activation=my_softplus,
kernel_regularizer=MyL1Regularizer(0.01),
kernel_constraint=my_positive_weights,
kernel_initializer=my_glorot_initializer),
])
model.compile(loss="mse", optimizer="nadam", metrics=["mae"])
model.fit(X_train_scaled, y_train, epochs=2,
validation_data=(X_valid_scaled, y_valid))
model.save("my_model_with_many_custom_parts.h5")
model = keras.models.load_model(
"my_model_with_many_custom_parts.h5",
custom_objects={
"MyL1Regularizer": MyL1Regularizer,
"my_positive_weights": my_positive_weights,
"my_glorot_initializer": my_glorot_initializer,
"my_softplus": my_softplus,
}) | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Custom Metrics | keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Dense(30, activation="selu", kernel_initializer="lecun_normal",
input_shape=input_shape),
keras.layers.Dense(1),
])
model.compile(loss="mse", optimizer="nadam", metrics=[create_huber(2.0)])
model.fit(X_train_scaled, y_train, epochs=2) | Epoch 1/2
363/363 [==============================] - 1s 572us/step - loss: 3.5903 - huber_fn: 1.5558
Epoch 2/2
363/363 [==============================] - 0s 552us/step - loss: 0.8054 - huber_fn: 0.3095
| Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
**Note**: if you use the same function as the loss and a metric, you may be surprised to see different results. This is generally just due to floating point precision errors: even though the mathematical equations are equivalent, the operations are not run in the same order, which can lead to small differences. Moreover, when using sample weights, there's more than just precision errors:* the loss since the start of the epoch is the mean of all batch losses seen so far. Each batch loss is the sum of the weighted instance losses divided by the _batch size_ (not the sum of weights, so the batch loss is _not_ the weighted mean of the losses).* the metric since the start of the epoch is equal to the sum of weighted instance losses divided by sum of all weights seen so far. In other words, it is the weighted mean of all the instance losses. Not the same thing.If you do the math, you will find that loss = metric * mean of sample weights (plus some floating point precision error). | model.compile(loss=create_huber(2.0), optimizer="nadam", metrics=[create_huber(2.0)])
sample_weight = np.random.rand(len(y_train))
history = model.fit(X_train_scaled, y_train, epochs=2, sample_weight=sample_weight)
history.history["loss"][0], history.history["huber_fn"][0] * sample_weight.mean() | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Streaming metrics | precision = keras.metrics.Precision()
precision([0, 1, 1, 1, 0, 1, 0, 1], [1, 1, 0, 1, 0, 1, 0, 1])
precision([0, 1, 0, 0, 1, 0, 1, 1], [1, 0, 1, 1, 0, 0, 0, 0])
precision.result()
precision.variables
precision.reset_states() | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Creating a streaming metric: | class HuberMetric(keras.metrics.Metric):
def __init__(self, threshold=1.0, **kwargs):
super().__init__(**kwargs) # handles base args (e.g., dtype)
self.threshold = threshold
self.huber_fn = create_huber(threshold)
self.total = self.add_weight("total", initializer="zeros")
self.count = self.add_weight("count", initializer="zeros")
def update_state(self, y_true, y_pred, sample_weight=None):
metric = self.huber_fn(y_true, y_pred)
self.total.assign_add(tf.reduce_sum(metric))
self.count.assign_add(tf.cast(tf.size(y_true), tf.float32))
def result(self):
return self.total / self.count
def get_config(self):
base_config = super().get_config()
return {**base_config, "threshold": self.threshold}
m = HuberMetric(2.)
# total = 2 * |10 - 2| - 2²/2 = 14
# count = 1
# result = 14 / 1 = 14
m(tf.constant([[2.]]), tf.constant([[10.]]))
# total = total + (|1 - 0|² / 2) + (2 * |9.25 - 5| - 2² / 2) = 14 + 7 = 21
# count = count + 2 = 3
# result = total / count = 21 / 3 = 7
m(tf.constant([[0.], [5.]]), tf.constant([[1.], [9.25]]))
m.result()
m.variables
m.reset_states()
m.variables | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Let's check that the `HuberMetric` class works well: | keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Dense(30, activation="selu", kernel_initializer="lecun_normal",
input_shape=input_shape),
keras.layers.Dense(1),
])
model.compile(loss=create_huber(2.0), optimizer="nadam", metrics=[HuberMetric(2.0)])
model.fit(X_train_scaled.astype(np.float32), y_train.astype(np.float32), epochs=2)
model.save("my_model_with_a_custom_metric.h5")
model = keras.models.load_model("my_model_with_a_custom_metric.h5",
custom_objects={"huber_fn": create_huber(2.0),
"HuberMetric": HuberMetric})
model.fit(X_train_scaled.astype(np.float32), y_train.astype(np.float32), epochs=2) | Epoch 1/2
363/363 [==============================] - 0s 545us/step - loss: 0.2350 - huber_metric: 0.2350
Epoch 2/2
363/363 [==============================] - 0s 524us/step - loss: 0.2278 - huber_metric: 0.2278
| Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
**Warning**: In TF 2.2, tf.keras adds an extra first metric in `model.metrics` at position 0 (see [TF issue 38150](https://github.com/tensorflow/tensorflow/issues/38150)). This forces us to use `model.metrics[-1]` rather than `model.metrics[0]` to access the `HuberMetric`. | model.metrics[-1].threshold | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Looks like it works fine! More simply, we could have created the class like this: | class HuberMetric(keras.metrics.Mean):
def __init__(self, threshold=1.0, name='HuberMetric', dtype=None):
self.threshold = threshold
self.huber_fn = create_huber(threshold)
super().__init__(name=name, dtype=dtype)
def update_state(self, y_true, y_pred, sample_weight=None):
metric = self.huber_fn(y_true, y_pred)
super(HuberMetric, self).update_state(metric, sample_weight)
def get_config(self):
base_config = super().get_config()
return {**base_config, "threshold": self.threshold} | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
This class handles shapes better, and it also supports sample weights. | keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Dense(30, activation="selu", kernel_initializer="lecun_normal",
input_shape=input_shape),
keras.layers.Dense(1),
])
model.compile(loss=keras.losses.Huber(2.0), optimizer="nadam", weighted_metrics=[HuberMetric(2.0)])
sample_weight = np.random.rand(len(y_train))
history = model.fit(X_train_scaled.astype(np.float32), y_train.astype(np.float32),
epochs=2, sample_weight=sample_weight)
history.history["loss"][0], history.history["HuberMetric"][0] * sample_weight.mean()
model.save("my_model_with_a_custom_metric_v2.h5")
model = keras.models.load_model("my_model_with_a_custom_metric_v2.h5",
custom_objects={"HuberMetric": HuberMetric})
model.fit(X_train_scaled.astype(np.float32), y_train.astype(np.float32), epochs=2)
model.metrics[-1].threshold | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Custom Layers | exponential_layer = keras.layers.Lambda(lambda x: tf.exp(x))
exponential_layer([-1., 0., 1.]) | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Adding an exponential layer at the output of a regression model can be useful if the values to predict are positive and with very different scales (e.g., 0.001, 10., 10000): | keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Dense(30, activation="relu", input_shape=input_shape),
keras.layers.Dense(1),
exponential_layer
])
model.compile(loss="mse", optimizer="sgd")
model.fit(X_train_scaled, y_train, epochs=5,
validation_data=(X_valid_scaled, y_valid))
model.evaluate(X_test_scaled, y_test)
class MyDense(keras.layers.Layer):
def __init__(self, units, activation=None, **kwargs):
super().__init__(**kwargs)
self.units = units
self.activation = keras.activations.get(activation)
def build(self, batch_input_shape):
self.kernel = self.add_weight(
name="kernel", shape=[batch_input_shape[-1], self.units],
initializer="glorot_normal")
self.bias = self.add_weight(
name="bias", shape=[self.units], initializer="zeros")
super().build(batch_input_shape) # must be at the end
def call(self, X):
return self.activation(X @ self.kernel + self.bias)
def compute_output_shape(self, batch_input_shape):
return tf.TensorShape(batch_input_shape.as_list()[:-1] + [self.units])
def get_config(self):
base_config = super().get_config()
return {**base_config, "units": self.units,
"activation": keras.activations.serialize(self.activation)}
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
MyDense(30, activation="relu", input_shape=input_shape),
MyDense(1)
])
model.compile(loss="mse", optimizer="nadam")
model.fit(X_train_scaled, y_train, epochs=2,
validation_data=(X_valid_scaled, y_valid))
model.evaluate(X_test_scaled, y_test)
model.save("my_model_with_a_custom_layer.h5")
model = keras.models.load_model("my_model_with_a_custom_layer.h5",
custom_objects={"MyDense": MyDense})
class MyMultiLayer(keras.layers.Layer):
def call(self, X):
X1, X2 = X
print("X1.shape: ", X1.shape ," X2.shape: ", X2.shape) # Debugging of custom layer
return X1 + X2, X1 * X2
def compute_output_shape(self, batch_input_shape):
batch_input_shape1, batch_input_shape2 = batch_input_shape
return [batch_input_shape1, batch_input_shape2] | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Our custom layer can be called using the functional API like this: | inputs1 = keras.layers.Input(shape=[2])
inputs2 = keras.layers.Input(shape=[2])
outputs1, outputs2 = MyMultiLayer()((inputs1, inputs2)) | X1.shape: (None, 2) X2.shape: (None, 2)
| Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Note that the `call()` method receives symbolic inputs, whose shape is only partially specified (at this stage, we don't know the batch size, which is why the first dimension is `None`):We can also pass actual data to the custom layer. To test this, let's split each dataset's inputs into two parts, with four features each: | def split_data(data):
columns_count = data.shape[-1]
half = columns_count // 2
return data[:, :half], data[:, half:]
X_train_scaled_A, X_train_scaled_B = split_data(X_train_scaled)
X_valid_scaled_A, X_valid_scaled_B = split_data(X_valid_scaled)
X_test_scaled_A, X_test_scaled_B = split_data(X_test_scaled)
# Printing the splitted data shapes
X_train_scaled_A.shape, X_train_scaled_B.shape | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Now notice that the shapes are fully specified: | outputs1, outputs2 = MyMultiLayer()((X_train_scaled_A, X_train_scaled_B)) | X1.shape: (11610, 4) X2.shape: (11610, 4)
| Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Let's build a more complete model using the functional API (this is just a toy example, don't expect awesome performance): | keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
input_A = keras.layers.Input(shape=X_train_scaled_A.shape[-1])
input_B = keras.layers.Input(shape=X_train_scaled_B.shape[-1])
hidden_A, hidden_B = MyMultiLayer()((input_A, input_B))
hidden_A = keras.layers.Dense(30, activation='selu')(hidden_A)
hidden_B = keras.layers.Dense(30, activation='selu')(hidden_B)
concat = keras.layers.Concatenate()((hidden_A, hidden_B))
output = keras.layers.Dense(1)(concat)
model = keras.models.Model(inputs=[input_A, input_B], outputs=[output])
model.compile(loss='mse', optimizer='nadam')
model.fit((X_train_scaled_A, X_train_scaled_B), y_train, epochs=2,
validation_data=((X_valid_scaled_A, X_valid_scaled_B), y_valid)) | Epoch 1/2
X1.shape: (None, 4) X2.shape: (None, 4)
X1.shape: (None, 4) X2.shape: (None, 4)
356/363 [============================>.] - ETA: 0s - loss: 3.6305X1.shape: (None, 4) X2.shape: (None, 4)
363/363 [==============================] - 1s 1ms/step - loss: 3.5973 - val_loss: 1.3630
Epoch 2/2
363/363 [==============================] - 0s 1ms/step - loss: 1.0132 - val_loss: 0.9773
| Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Now let's create a layer with a different behavior during training and testing: | class AddGaussianNoise(keras.layers.Layer):
def __init__(self, stddev, **kwargs):
super().__init__(**kwargs)
self.stddev = stddev
def call(self, X, training=None):
if training:
noise = tf.random.normal(tf.shape(X), stddev=self.stddev)
return X + noise
else:
return X
def compute_output_shape(self, batch_input_shape):
return batch_input_shape | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Here's a simple model that uses this custom layer: | keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
AddGaussianNoise(stddev=1.0),
keras.layers.Dense(30, activation="selu"),
keras.layers.Dense(1)
])
model.compile(loss="mse", optimizer="nadam")
model.fit(X_train_scaled, y_train, epochs=2,
validation_data=(X_valid_scaled, y_valid))
model.evaluate(X_test_scaled, y_test) | Epoch 1/2
363/363 [==============================] - 1s 892us/step - loss: 3.7869 - val_loss: 7.6082
Epoch 2/2
363/363 [==============================] - 0s 685us/step - loss: 1.2375 - val_loss: 4.4597
162/162 [==============================] - 0s 416us/step - loss: 0.7560
| Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Custom Models | X_new_scaled = X_test_scaled
class ResidualBlock(keras.layers.Layer):
def __init__(self, n_layers, n_neurons, **kwargs):
super().__init__(**kwargs)
self.hidden = [keras.layers.Dense(n_neurons, activation="elu",
kernel_initializer="he_normal")
for _ in range(n_layers)]
def call(self, inputs):
Z = inputs
for layer in self.hidden:
Z = layer(Z)
return inputs + Z
class ResidualRegressor(keras.models.Model):
def __init__(self, output_dim, **kwargs):
super().__init__(**kwargs)
self.hidden1 = keras.layers.Dense(30, activation="elu",
kernel_initializer="he_normal")
self.block1 = ResidualBlock(2, 30)
self.block2 = ResidualBlock(2, 30)
self.out = keras.layers.Dense(output_dim)
def call(self, inputs):
Z = self.hidden1(inputs)
for _ in range(1 + 3):
Z = self.block1(Z)
Z = self.block2(Z)
return self.out(Z)
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = ResidualRegressor(1)
model.compile(loss="mse", optimizer="nadam")
history = model.fit(X_train_scaled, y_train, epochs=5)
score = model.evaluate(X_test_scaled, y_test)
y_pred = model.predict(X_new_scaled)
model.save("my_custom_model.ckpt")
model = keras.models.load_model("my_custom_model.ckpt")
history = model.fit(X_train_scaled, y_train, epochs=5) | Epoch 1/5
363/363 [==============================] - 1s 851us/step - loss: 0.9476
Epoch 2/5
363/363 [==============================] - 0s 736us/step - loss: 0.6998
Epoch 3/5
363/363 [==============================] - 0s 737us/step - loss: 0.4668
Epoch 4/5
363/363 [==============================] - 0s 758us/step - loss: 0.4818
Epoch 5/5
363/363 [==============================] - 0s 756us/step - loss: 0.4591
| Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
We could have defined the model using the sequential API instead: | keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
block1 = ResidualBlock(2, 30)
model = keras.models.Sequential([
keras.layers.Dense(30, activation="elu", kernel_initializer="he_normal"),
block1, block1, block1, block1,
ResidualBlock(2, 30),
keras.layers.Dense(1)
])
model.compile(loss="mse", optimizer="nadam")
history = model.fit(X_train_scaled, y_train, epochs=5)
score = model.evaluate(X_test_scaled, y_test)
y_pred = model.predict(X_new_scaled) | Epoch 1/5
363/363 [==============================] - 1s 709us/step - loss: 1.5508
Epoch 2/5
363/363 [==============================] - 0s 645us/step - loss: 0.5562
Epoch 3/5
363/363 [==============================] - 0s 625us/step - loss: 0.6406
Epoch 4/5
363/363 [==============================] - 0s 636us/step - loss: 0.3759
Epoch 5/5
363/363 [==============================] - 0s 623us/step - loss: 0.3875
162/162 [==============================] - 0s 463us/step - loss: 0.4852
| Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Losses and Metrics Based on Model Internals **Note**: the following code has two differences with the code in the book:1. It creates a `keras.metrics.Mean()` metric in the constructor and uses it in the `call()` method to track the mean reconstruction loss. Since we only want to do this during training, we add a `training` argument to the `call()` method, and if `training` is `True`, then we update `reconstruction_mean` and we call `self.add_metric()` to ensure it's displayed properly.2. Due to an issue introduced in TF 2.2 ([46858](https://github.com/tensorflow/tensorflow/issues/46858)), we must not call `super().build()` inside the `build()` method. | class ReconstructingRegressor(keras.Model):
def __init__(self, output_dim, **kwargs):
super().__init__(**kwargs)
self.hidden = [keras.layers.Dense(30, activation="selu",
kernel_initializer="lecun_normal")
for _ in range(5)]
self.out = keras.layers.Dense(output_dim)
self.reconstruction_mean = keras.metrics.Mean(name="reconstruction_error")
def build(self, batch_input_shape):
n_inputs = batch_input_shape[-1]
self.reconstruct = keras.layers.Dense(n_inputs)
#super().build(batch_input_shape)
def call(self, inputs, training=None):
Z = inputs
for layer in self.hidden:
Z = layer(Z)
reconstruction = self.reconstruct(Z)
recon_loss = tf.reduce_mean(tf.square(reconstruction - inputs))
self.add_loss(0.05 * recon_loss)
if training:
result = self.reconstruction_mean(recon_loss)
self.add_metric(result)
return self.out(Z)
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = ReconstructingRegressor(1)
model.compile(loss="mse", optimizer="nadam")
history = model.fit(X_train_scaled, y_train, epochs=2)
y_pred = model.predict(X_test_scaled) | Epoch 1/2
363/363 [==============================] - 1s 810us/step - loss: 1.6313 - reconstruction_error: 1.0474
Epoch 2/2
363/363 [==============================] - 0s 683us/step - loss: 0.4536 - reconstruction_error: 0.4022
| Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Computing Gradients with Autodiff | def f(w1, w2):
return 3 * w1 ** 2 + 2 * w1 * w2
w1, w2 = 5, 3
eps = 1e-6
(f(w1 + eps, w2) - f(w1, w2)) / eps
(f(w1, w2 + eps) - f(w1, w2)) / eps
w1, w2 = tf.Variable(5.), tf.Variable(3.)
with tf.GradientTape() as tape:
z = f(w1, w2)
gradients = tape.gradient(z, [w1, w2])
gradients
with tf.GradientTape() as tape:
z = f(w1, w2)
dz_dw1 = tape.gradient(z, w1)
try:
dz_dw2 = tape.gradient(z, w2)
except RuntimeError as ex:
print(ex)
with tf.GradientTape(persistent=True) as tape:
z = f(w1, w2)
dz_dw1 = tape.gradient(z, w1)
dz_dw2 = tape.gradient(z, w2) # works now!
del tape
dz_dw1, dz_dw2
c1, c2 = tf.constant(5.), tf.constant(3.)
with tf.GradientTape() as tape:
z = f(c1, c2)
gradients = tape.gradient(z, [c1, c2])
gradients
with tf.GradientTape() as tape:
tape.watch(c1)
tape.watch(c2)
z = f(c1, c2)
gradients = tape.gradient(z, [c1, c2])
gradients
with tf.GradientTape() as tape:
z1 = f(w1, w2 + 2.)
z2 = f(w1, w2 + 5.)
z3 = f(w1, w2 + 7.)
tape.gradient([z1, z2, z3], [w1, w2])
with tf.GradientTape(persistent=True) as tape:
z1 = f(w1, w2 + 2.)
z2 = f(w1, w2 + 5.)
z3 = f(w1, w2 + 7.)
tf.reduce_sum(tf.stack([tape.gradient(z, [w1, w2]) for z in (z1, z2, z3)]), axis=0)
del tape
with tf.GradientTape(persistent=True) as hessian_tape:
with tf.GradientTape() as jacobian_tape:
z = f(w1, w2)
jacobians = jacobian_tape.gradient(z, [w1, w2])
hessians = [hessian_tape.gradient(jacobian, [w1, w2])
for jacobian in jacobians]
del hessian_tape
jacobians
hessians
def f(w1, w2):
return 3 * w1 ** 2 + tf.stop_gradient(2 * w1 * w2)
with tf.GradientTape() as tape:
z = f(w1, w2)
tape.gradient(z, [w1, w2])
x = tf.Variable(100.)
with tf.GradientTape() as tape:
z = my_softplus(x)
tape.gradient(z, [x])
tf.math.log(tf.exp(tf.constant(30., dtype=tf.float32)) + 1.)
x = tf.Variable([100.])
with tf.GradientTape() as tape:
z = my_softplus(x)
tape.gradient(z, [x])
@tf.custom_gradient
def my_better_softplus(z):
exp = tf.exp(z)
def my_softplus_gradients(grad):
return grad / (1 + 1 / exp)
return tf.math.log(exp + 1), my_softplus_gradients
def my_better_softplus(z):
return tf.where(z > 30., z, tf.math.log(tf.exp(z) + 1.))
x = tf.Variable([1000.])
with tf.GradientTape() as tape:
z = my_better_softplus(x)
z, tape.gradient(z, [x]) | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Computing Gradients Using Autodiff | keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
l2_reg = keras.regularizers.l2(0.05)
model = keras.models.Sequential([
keras.layers.Dense(30, activation="elu", kernel_initializer="he_normal",
kernel_regularizer=l2_reg),
keras.layers.Dense(1, kernel_regularizer=l2_reg)
])
def random_batch(X, y, batch_size=32):
idx = np.random.randint(len(X), size=batch_size)
return X[idx], y[idx]
def print_status_bar(iteration, total, loss, metrics=None):
metrics = " - ".join(["{}: {:.4f}".format(m.name, m.result())
for m in [loss] + (metrics or [])])
end = "" if iteration < total else "\n"
print("\r{}/{} - ".format(iteration, total) + metrics,
end=end)
import time
mean_loss = keras.metrics.Mean(name="loss")
mean_square = keras.metrics.Mean(name="mean_square")
for i in range(1, 50 + 1):
loss = 1 / i
mean_loss(loss)
mean_square(i ** 2)
print_status_bar(i, 50, mean_loss, [mean_square])
time.sleep(0.05) | 50/50 - loss: 0.0900 - mean_square: 858.5000
| Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
A fancier version with a progress bar: | def progress_bar(iteration, total, size=30):
running = iteration < total
c = ">" if running else "="
p = (size - 1) * iteration // total
fmt = "{{:-{}d}}/{{}} [{{}}]".format(len(str(total)))
params = [iteration, total, "=" * p + c + "." * (size - p - 1)]
return fmt.format(*params)
progress_bar(3500, 10000, size=6)
def print_status_bar(iteration, total, loss, metrics=None, size=30):
metrics = " - ".join(["{}: {:.4f}".format(m.name, m.result())
for m in [loss] + (metrics or [])])
end = "" if iteration < total else "\n"
print("\r{} - {}".format(progress_bar(iteration, total), metrics), end=end)
mean_loss = keras.metrics.Mean(name="loss")
mean_square = keras.metrics.Mean(name="mean_square")
for i in range(1, 50 + 1):
loss = 1 / i
mean_loss(loss)
mean_square(i ** 2)
print_status_bar(i, 50, mean_loss, [mean_square])
time.sleep(0.05)
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
n_epochs = 5
batch_size = 32
n_steps = len(X_train) // batch_size
optimizer = keras.optimizers.Nadam(learning_rate=0.01)
loss_fn = keras.losses.mean_squared_error
mean_loss = keras.metrics.Mean()
metrics = [keras.metrics.MeanAbsoluteError()]
for epoch in range(1, n_epochs + 1):
print("Epoch {}/{}".format(epoch, n_epochs))
for step in range(1, n_steps + 1):
X_batch, y_batch = random_batch(X_train_scaled, y_train)
with tf.GradientTape() as tape:
y_pred = model(X_batch)
main_loss = tf.reduce_mean(loss_fn(y_batch, y_pred))
loss = tf.add_n([main_loss] + model.losses)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
for variable in model.variables:
if variable.constraint is not None:
variable.assign(variable.constraint(variable))
mean_loss(loss)
for metric in metrics:
metric(y_batch, y_pred)
print_status_bar(step * batch_size, len(y_train), mean_loss, metrics)
print_status_bar(len(y_train), len(y_train), mean_loss, metrics)
for metric in [mean_loss] + metrics:
metric.reset_states()
try:
from tqdm.notebook import trange
from collections import OrderedDict
with trange(1, n_epochs + 1, desc="All epochs") as epochs:
for epoch in epochs:
with trange(1, n_steps + 1, desc="Epoch {}/{}".format(epoch, n_epochs)) as steps:
for step in steps:
X_batch, y_batch = random_batch(X_train_scaled, y_train)
with tf.GradientTape() as tape:
y_pred = model(X_batch)
main_loss = tf.reduce_mean(loss_fn(y_batch, y_pred))
loss = tf.add_n([main_loss] + model.losses)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
for variable in model.variables:
if variable.constraint is not None:
variable.assign(variable.constraint(variable))
status = OrderedDict()
mean_loss(loss)
status["loss"] = mean_loss.result().numpy()
for metric in metrics:
metric(y_batch, y_pred)
status[metric.name] = metric.result().numpy()
steps.set_postfix(status)
for metric in [mean_loss] + metrics:
metric.reset_states()
except ImportError as ex:
print("To run this cell, please install tqdm, ipywidgets and restart Jupyter") | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
TensorFlow Functions | def cube(x):
return x ** 3
cube(2)
cube(tf.constant(2.0))
tf_cube = tf.function(cube)
tf_cube
tf_cube(2)
tf_cube(tf.constant(2.0)) | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
TF Functions and Concrete Functions | concrete_function = tf_cube.get_concrete_function(tf.constant(2.0))
concrete_function.graph
concrete_function(tf.constant(2.0))
concrete_function is tf_cube.get_concrete_function(tf.constant(2.0)) | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Exploring Function Definitions and Graphs | concrete_function.graph
ops = concrete_function.graph.get_operations()
ops
pow_op = ops[2]
list(pow_op.inputs)
pow_op.outputs
concrete_function.graph.get_operation_by_name('x')
concrete_function.graph.get_tensor_by_name('Identity:0')
concrete_function.function_def.signature | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
How TF Functions Trace Python Functions to Extract Their Computation Graphs | @tf.function
def tf_cube(x):
print("print:", x)
return x ** 3
result = tf_cube(tf.constant(2.0))
result
result = tf_cube(2)
result = tf_cube(3)
result = tf_cube(tf.constant([[1., 2.]])) # New shape: trace!
result = tf_cube(tf.constant([[3., 4.], [5., 6.]])) # New shape: trace!
result = tf_cube(tf.constant([[7., 8.], [9., 10.], [11., 12.]])) # New shape: trace!
| print: 2
print: 3
print: Tensor("x:0", shape=(1, 2), dtype=float32)
print: Tensor("x:0", shape=(2, 2), dtype=float32)
WARNING:tensorflow:5 out of the last 5 calls to <function tf_cube at 0x7fbfc0363440> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for more details.
| Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
It is also possible to specify a particular input signature: | @tf.function(input_signature=[tf.TensorSpec([None, 28, 28], tf.float32)])
def shrink(images):
print("Tracing", images)
return images[:, ::2, ::2] # drop half the rows and columns
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
img_batch_1 = tf.random.uniform(shape=[100, 28, 28])
img_batch_2 = tf.random.uniform(shape=[50, 28, 28])
preprocessed_images = shrink(img_batch_1) # Traces the function.
preprocessed_images = shrink(img_batch_2) # Reuses the same concrete function.
img_batch_3 = tf.random.uniform(shape=[2, 2, 2])
try:
preprocessed_images = shrink(img_batch_3) # rejects unexpected types or shapes
except ValueError as ex:
print(ex) | Python inputs incompatible with input_signature:
inputs: (
tf.Tensor(
[[[0.7413678 0.62854624]
[0.01738465 0.3431449 ]]
[[0.51063764 0.3777541 ]
[0.07321596 0.02137029]]], shape=(2, 2, 2), dtype=float32))
input_signature: (
TensorSpec(shape=(None, 28, 28), dtype=tf.float32, name=None))
| Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Using Autograph To Capture Control Flow A "static" `for` loop using `range()`: | @tf.function
def add_10(x):
for i in range(10):
x += 1
return x
add_10(tf.constant(5))
add_10.get_concrete_function(tf.constant(5)).graph.get_operations() | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
A "dynamic" loop using `tf.while_loop()`: | @tf.function
def add_10(x):
condition = lambda i, x: tf.less(i, 10)
body = lambda i, x: (tf.add(i, 1), tf.add(x, 1))
final_i, final_x = tf.while_loop(condition, body, [tf.constant(0), x])
return final_x
add_10(tf.constant(5))
add_10.get_concrete_function(tf.constant(5)).graph.get_operations() | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
A "dynamic" `for` loop using `tf.range()` (captured by autograph): | @tf.function
def add_10(x):
for i in tf.range(10):
x = x + 1
return x
add_10.get_concrete_function(tf.constant(0)).graph.get_operations() | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Handling Variables and Other Resources in TF Functions | counter = tf.Variable(0)
@tf.function
def increment(counter, c=1):
return counter.assign_add(c)
increment(counter)
increment(counter)
function_def = increment.get_concrete_function(counter).function_def
function_def.signature.input_arg[0]
counter = tf.Variable(0)
@tf.function
def increment(c=1):
return counter.assign_add(c)
increment()
increment()
function_def = increment.get_concrete_function().function_def
function_def.signature.input_arg[0]
class Counter:
def __init__(self):
self.counter = tf.Variable(0)
@tf.function
def increment(self, c=1):
return self.counter.assign_add(c)
c = Counter()
c.increment()
c.increment()
@tf.function
def add_10(x):
for i in tf.range(10):
x += 1
return x
print(tf.autograph.to_code(add_10.python_function))
def display_tf_code(func):
from IPython.display import display, Markdown
if hasattr(func, "python_function"):
func = func.python_function
code = tf.autograph.to_code(func)
display(Markdown('```python\n{}\n```'.format(code)))
display_tf_code(add_10) | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Using TF Functions with tf.keras (or Not) By default, tf.keras will automatically convert your custom code into TF Functions, no need to use`tf.function()`: | # Custom loss function
def my_mse(y_true, y_pred):
print("Tracing loss my_mse()")
return tf.reduce_mean(tf.square(y_pred - y_true))
# Custom metric function
def my_mae(y_true, y_pred):
print("Tracing metric my_mae()")
return tf.reduce_mean(tf.abs(y_pred - y_true))
# Custom layer
class MyDense(keras.layers.Layer):
def __init__(self, units, activation=None, **kwargs):
super().__init__(**kwargs)
self.units = units
self.activation = keras.activations.get(activation)
def build(self, input_shape):
self.kernel = self.add_weight(name='kernel',
shape=(input_shape[1], self.units),
initializer='uniform',
trainable=True)
self.biases = self.add_weight(name='bias',
shape=(self.units,),
initializer='zeros',
trainable=True)
super().build(input_shape)
def call(self, X):
print("Tracing MyDense.call()")
return self.activation(X @ self.kernel + self.biases)
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
# Custom model
class MyModel(keras.models.Model):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.hidden1 = MyDense(30, activation="relu")
self.hidden2 = MyDense(30, activation="relu")
self.output_ = MyDense(1)
def call(self, input):
print("Tracing MyModel.call()")
hidden1 = self.hidden1(input)
hidden2 = self.hidden2(hidden1)
concat = keras.layers.concatenate([input, hidden2])
output = self.output_(concat)
return output
model = MyModel()
model.compile(loss=my_mse, optimizer="nadam", metrics=[my_mae])
model.fit(X_train_scaled, y_train, epochs=2,
validation_data=(X_valid_scaled, y_valid))
model.evaluate(X_test_scaled, y_test) | Epoch 1/2
Tracing MyModel.call()
Tracing MyDense.call()
Tracing MyDense.call()
Tracing MyDense.call()
Tracing loss my_mse()
Tracing metric my_mae()
Tracing MyModel.call()
Tracing MyDense.call()
Tracing MyDense.call()
Tracing MyDense.call()
Tracing loss my_mse()
Tracing metric my_mae()
340/363 [===========================>..] - ETA: 0s - loss: 2.8762 - my_mae: 1.2771Tracing MyModel.call()
Tracing MyDense.call()
Tracing MyDense.call()
Tracing MyDense.call()
Tracing loss my_mse()
Tracing metric my_mae()
363/363 [==============================] - 1s 1ms/step - loss: 2.7755 - my_mae: 1.2455 - val_loss: 0.5569 - val_my_mae: 0.4819
Epoch 2/2
363/363 [==============================] - 0s 802us/step - loss: 0.4697 - my_mae: 0.4911 - val_loss: 0.4664 - val_my_mae: 0.4576
162/162 [==============================] - 0s 469us/step - loss: 0.4164 - my_mae: 0.4639
| Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
You can turn this off by creating the model with `dynamic=True` (or calling `super().__init__(dynamic=True, **kwargs)` in the model's constructor): | keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = MyModel(dynamic=True)
model.compile(loss=my_mse, optimizer="nadam", metrics=[my_mae]) | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Not the custom code will be called at each iteration. Let's fit, validate and evaluate with tiny datasets to avoid getting too much output: | model.fit(X_train_scaled[:64], y_train[:64], epochs=1,
validation_data=(X_valid_scaled[:64], y_valid[:64]), verbose=0)
model.evaluate(X_test_scaled[:64], y_test[:64], verbose=0) | Tracing MyModel.call()
Tracing MyDense.call()
Tracing MyDense.call()
Tracing MyDense.call()
Tracing loss my_mse()
Tracing metric my_mae()
Tracing MyModel.call()
Tracing MyDense.call()
Tracing MyDense.call()
Tracing MyDense.call()
Tracing loss my_mse()
Tracing metric my_mae()
Tracing MyModel.call()
Tracing MyDense.call()
Tracing MyDense.call()
Tracing MyDense.call()
Tracing loss my_mse()
Tracing metric my_mae()
Tracing MyModel.call()
Tracing MyDense.call()
Tracing MyDense.call()
Tracing MyDense.call()
Tracing loss my_mse()
Tracing metric my_mae()
Tracing MyModel.call()
Tracing MyDense.call()
Tracing MyDense.call()
Tracing MyDense.call()
Tracing loss my_mse()
Tracing metric my_mae()
Tracing MyModel.call()
Tracing MyDense.call()
Tracing MyDense.call()
Tracing MyDense.call()
Tracing loss my_mse()
Tracing metric my_mae()
| Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Alternatively, you can compile a model with `run_eagerly=True`: | keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = MyModel()
model.compile(loss=my_mse, optimizer="nadam", metrics=[my_mae], run_eagerly=True)
model.fit(X_train_scaled[:64], y_train[:64], epochs=1,
validation_data=(X_valid_scaled[:64], y_valid[:64]), verbose=0)
model.evaluate(X_test_scaled[:64], y_test[:64], verbose=0) | Tracing MyModel.call()
Tracing MyDense.call()
Tracing MyDense.call()
Tracing MyDense.call()
Tracing loss my_mse()
Tracing metric my_mae()
Tracing MyModel.call()
Tracing MyDense.call()
Tracing MyDense.call()
Tracing MyDense.call()
Tracing loss my_mse()
Tracing metric my_mae()
Tracing MyModel.call()
Tracing MyDense.call()
Tracing MyDense.call()
Tracing MyDense.call()
Tracing loss my_mse()
Tracing metric my_mae()
Tracing MyModel.call()
Tracing MyDense.call()
Tracing MyDense.call()
Tracing MyDense.call()
Tracing loss my_mse()
Tracing metric my_mae()
Tracing MyModel.call()
Tracing MyDense.call()
Tracing MyDense.call()
Tracing MyDense.call()
Tracing loss my_mse()
Tracing metric my_mae()
Tracing MyModel.call()
Tracing MyDense.call()
Tracing MyDense.call()
Tracing MyDense.call()
Tracing loss my_mse()
Tracing metric my_mae()
| Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Custom Optimizers Defining custom optimizers is not very common, but in case you are one of the happy few who gets to write one, here is an example: | class MyMomentumOptimizer(keras.optimizers.Optimizer):
def __init__(self, learning_rate=0.001, momentum=0.9, name="MyMomentumOptimizer", **kwargs):
"""Call super().__init__() and use _set_hyper() to store hyperparameters"""
super().__init__(name, **kwargs)
self._set_hyper("learning_rate", kwargs.get("lr", learning_rate)) # handle lr=learning_rate
self._set_hyper("decay", self._initial_decay) #
self._set_hyper("momentum", momentum)
def _create_slots(self, var_list):
"""For each model variable, create the optimizer variable associated with it.
TensorFlow calls these optimizer variables "slots".
For momentum optimization, we need one momentum slot per model variable.
"""
for var in var_list:
self.add_slot(var, "momentum")
@tf.function
def _resource_apply_dense(self, grad, var):
"""Update the slots and perform one optimization step for one model variable
"""
var_dtype = var.dtype.base_dtype
lr_t = self._decayed_lr(var_dtype) # handle learning rate decay
momentum_var = self.get_slot(var, "momentum")
momentum_hyper = self._get_hyper("momentum", var_dtype)
momentum_var.assign(momentum_var * momentum_hyper - (1. - momentum_hyper)* grad)
var.assign_add(momentum_var * lr_t)
def _resource_apply_sparse(self, grad, var):
raise NotImplementedError
def get_config(self):
base_config = super().get_config()
return {
**base_config,
"learning_rate": self._serialize_hyperparameter("learning_rate"),
"decay": self._serialize_hyperparameter("decay"),
"momentum": self._serialize_hyperparameter("momentum"),
}
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([keras.layers.Dense(1, input_shape=[8])])
model.compile(loss="mse", optimizer=MyMomentumOptimizer())
model.fit(X_train_scaled, y_train, epochs=5) | Epoch 1/5
363/363 [==============================] - 0s 444us/step - loss: 4.9648
Epoch 2/5
363/363 [==============================] - 0s 444us/step - loss: 1.7888
Epoch 3/5
363/363 [==============================] - 0s 437us/step - loss: 1.0021
Epoch 4/5
363/363 [==============================] - 0s 451us/step - loss: 0.7869
Epoch 5/5
363/363 [==============================] - 0s 446us/step - loss: 0.7122
| Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Exercises 1. to 11.See Appendix A. 12. Implement a custom layer that performs _Layer Normalization__We will use this type of layer in Chapter 15 when using Recurrent Neural Networks._ a._Exercise: The `build()` method should define two trainable weights *α* and *β*, both of shape `input_shape[-1:]` and data type `tf.float32`. *α* should be initialized with 1s, and *β* with 0s._ Solution: see below. b._Exercise: The `call()` method should compute the mean_ μ _and standard deviation_ σ _of each instance's features. For this, you can use `tf.nn.moments(inputs, axes=-1, keepdims=True)`, which returns the mean μ and the variance σ2 of all instances (compute the square root of the variance to get the standard deviation). Then the function should compute and return *α*⊗(*X* - μ)/(σ + ε) + *β*, where ⊗ represents itemwise multiplication (`*`) and ε is a smoothing term (small constant to avoid division by zero, e.g., 0.001)._ | class LayerNormalization(keras.layers.Layer):
def __init__(self, eps=0.001, **kwargs):
super().__init__(**kwargs)
self.eps = eps
def build(self, batch_input_shape):
self.alpha = self.add_weight(
name="alpha", shape=batch_input_shape[-1:],
initializer="ones")
self.beta = self.add_weight(
name="beta", shape=batch_input_shape[-1:],
initializer="zeros")
super().build(batch_input_shape) # must be at the end
def call(self, X):
mean, variance = tf.nn.moments(X, axes=-1, keepdims=True)
return self.alpha * (X - mean) / (tf.sqrt(variance + self.eps)) + self.beta
def compute_output_shape(self, batch_input_shape):
return batch_input_shape
def get_config(self):
base_config = super().get_config()
return {**base_config, "eps": self.eps} | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Note that making _ε_ a hyperparameter (`eps`) was not compulsory. Also note that it's preferable to compute `tf.sqrt(variance + self.eps)` rather than `tf.sqrt(variance) + self.eps`. Indeed, the derivative of sqrt(z) is undefined when z=0, so training will bomb whenever the variance vector has at least one component equal to 0. Adding _ε_ within the square root guarantees that this will never happen. c._Exercise: Ensure that your custom layer produces the same (or very nearly the same) output as the `keras.layers.LayerNormalization` layer._ Let's create one instance of each class, apply them to some data (e.g., the training set), and ensure that the difference is negligeable. | X = X_train.astype(np.float32)
custom_layer_norm = LayerNormalization()
keras_layer_norm = keras.layers.LayerNormalization()
tf.reduce_mean(keras.losses.mean_absolute_error(
keras_layer_norm(X), custom_layer_norm(X))) | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Yep, that's close enough. To be extra sure, let's make alpha and beta completely random and compare again: | random_alpha = np.random.rand(X.shape[-1])
random_beta = np.random.rand(X.shape[-1])
custom_layer_norm.set_weights([random_alpha, random_beta])
keras_layer_norm.set_weights([random_alpha, random_beta])
tf.reduce_mean(keras.losses.mean_absolute_error(
keras_layer_norm(X), custom_layer_norm(X))) | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Still a negligeable difference! Our custom layer works fine. 13. Train a model using a custom training loop to tackle the Fashion MNIST dataset_The Fashion MNIST dataset was introduced in Chapter 10._ a._Exercise: Display the epoch, iteration, mean training loss, and mean accuracy over each epoch (updated at each iteration), as well as the validation loss and accuracy at the end of each epoch._ | (X_train_full, y_train_full), (X_test, y_test) = keras.datasets.fashion_mnist.load_data()
X_train_full = X_train_full.astype(np.float32) / 255.
X_valid, X_train = X_train_full[:5000], X_train_full[5000:]
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
X_test = X_test.astype(np.float32) / 255.
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(100, activation="relu"),
keras.layers.Dense(10, activation="softmax"),
])
n_epochs = 5
batch_size = 32
n_steps = len(X_train) // batch_size
optimizer = keras.optimizers.Nadam(learning_rate=0.01)
loss_fn = keras.losses.sparse_categorical_crossentropy
mean_loss = keras.metrics.Mean()
metrics = [keras.metrics.SparseCategoricalAccuracy()]
with trange(1, n_epochs + 1, desc="All epochs") as epochs:
for epoch in epochs:
with trange(1, n_steps + 1, desc="Epoch {}/{}".format(epoch, n_epochs)) as steps:
for step in steps:
X_batch, y_batch = random_batch(X_train, y_train)
with tf.GradientTape() as tape:
y_pred = model(X_batch)
main_loss = tf.reduce_mean(loss_fn(y_batch, y_pred))
loss = tf.add_n([main_loss] + model.losses)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
for variable in model.variables:
if variable.constraint is not None:
variable.assign(variable.constraint(variable))
status = OrderedDict()
mean_loss(loss)
status["loss"] = mean_loss.result().numpy()
for metric in metrics:
metric(y_batch, y_pred)
status[metric.name] = metric.result().numpy()
steps.set_postfix(status)
y_pred = model(X_valid)
status["val_loss"] = np.mean(loss_fn(y_valid, y_pred))
status["val_accuracy"] = np.mean(keras.metrics.sparse_categorical_accuracy(
tf.constant(y_valid, dtype=np.float32), y_pred))
steps.set_postfix(status)
for metric in [mean_loss] + metrics:
metric.reset_states()
| _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
b._Exercise: Try using a different optimizer with a different learning rate for the upper layers and the lower layers._ | keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
lower_layers = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(100, activation="relu"),
])
upper_layers = keras.models.Sequential([
keras.layers.Dense(10, activation="softmax"),
])
model = keras.models.Sequential([
lower_layers, upper_layers
])
lower_optimizer = keras.optimizers.SGD(learning_rate=1e-4)
upper_optimizer = keras.optimizers.Nadam(learning_rate=1e-3)
n_epochs = 5
batch_size = 32
n_steps = len(X_train) // batch_size
loss_fn = keras.losses.sparse_categorical_crossentropy
mean_loss = keras.metrics.Mean()
metrics = [keras.metrics.SparseCategoricalAccuracy()]
with trange(1, n_epochs + 1, desc="All epochs") as epochs:
for epoch in epochs:
with trange(1, n_steps + 1, desc="Epoch {}/{}".format(epoch, n_epochs)) as steps:
for step in steps:
X_batch, y_batch = random_batch(X_train, y_train)
with tf.GradientTape(persistent=True) as tape:
y_pred = model(X_batch)
main_loss = tf.reduce_mean(loss_fn(y_batch, y_pred))
loss = tf.add_n([main_loss] + model.losses)
for layers, optimizer in ((lower_layers, lower_optimizer),
(upper_layers, upper_optimizer)):
gradients = tape.gradient(loss, layers.trainable_variables)
optimizer.apply_gradients(zip(gradients, layers.trainable_variables))
del tape
for variable in model.variables:
if variable.constraint is not None:
variable.assign(variable.constraint(variable))
status = OrderedDict()
mean_loss(loss)
status["loss"] = mean_loss.result().numpy()
for metric in metrics:
metric(y_batch, y_pred)
status[metric.name] = metric.result().numpy()
steps.set_postfix(status)
y_pred = model(X_valid)
status["val_loss"] = np.mean(loss_fn(y_valid, y_pred))
status["val_accuracy"] = np.mean(keras.metrics.sparse_categorical_accuracy(
tf.constant(y_valid, dtype=np.float32), y_pred))
steps.set_postfix(status)
for metric in [mean_loss] + metrics:
metric.reset_states() | _____no_output_____ | Apache-2.0 | 12_custom_models_and_training_with_tensorflow.ipynb | mattkearns/handson-ml2 |
Binary classifiers for sold Ebay shoe listings Connect to database and retrieve data | from sqlalchemy import create_engine
import pandas as pd
from decouple import config
DATABASE_URL = config('DATABASE_URL')
engine = create_engine(DATABASE_URL)
df = pd.read_sql_query('select * from "shoes"',con=engine) | _____no_output_____ | MIT | ml-work/classification_problem.ipynb | numankh/HypeBeastHelper |
Data Cleaning Replace missing values with average value | price_fillna_value = round(df["price"].mean(),2)
free_shipping_fillna_value = int(df["free_shipping"].mean())
total_images_fillna_value = int(df["total_images"].mean())
seller_rating_fillna_value = int(df["seller_rating"].mean())
shoe_size_fillna_value = int(df["shoe_size"].mean())
df["price"].fillna(price_fillna_value,inplace=True)
df["free_shipping"].fillna(free_shipping_fillna_value,inplace=True)
df["total_images"].fillna(total_images_fillna_value,inplace=True)
df["seller_rating"].fillna(seller_rating_fillna_value,inplace=True)
df["shoe_size"].fillna(shoe_size_fillna_value,inplace=True) | _____no_output_____ | MIT | ml-work/classification_problem.ipynb | numankh/HypeBeastHelper |
Define input and output features | from sklearn.model_selection import train_test_split
import numpy as np
features = ['price','free_shipping', 'total_images', 'seller_rating', 'shoe_size', 'desc_fre_score', 'desc_avg_grade_score']
X = np.array(df[features])
y = np.array(df['sold'])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42) | _____no_output_____ | MIT | ml-work/classification_problem.ipynb | numankh/HypeBeastHelper |
Train Classification Models Logisitic Regression | from sklearn.linear_model import LogisticRegression
from sklearn import metrics
from sklearn.metrics import roc_curve, auc, roc_auc_score, f1_score
reg_log = LogisticRegression()
reg_log.fit(X_train, y_train)
y_pred = reg_log.predict(X_test)
print(metrics.classification_report(y_test, y_pred))
print("roc_auc_score: ", roc_auc_score(y_test, y_pred))
print("f1 score: ", f1_score(y_test, y_pred)) | precision recall f1-score support
False 0.70 1.00 0.83 45
True 0.00 0.00 0.00 19
accuracy 0.70 64
macro avg 0.35 0.50 0.41 64
weighted avg 0.49 0.70 0.58 64
roc_auc_score: 0.5
f1 score: 0.0
| MIT | ml-work/classification_problem.ipynb | numankh/HypeBeastHelper |
Random Forest | from sklearn.ensemble import RandomForestClassifier
reg_rf = RandomForestClassifier()
reg_rf.fit(X_train, y_train)
y_pred = reg_rf.predict(X_test)
print(metrics.classification_report(y_test, y_pred))
print("roc_auc_score: ", roc_auc_score(y_test, y_pred))
print("f1 score: ", f1_score(y_test, y_pred))
feature_df = pd.DataFrame({'Importance':reg_rf.feature_importances_, 'Features': features })
print(feature_df) | Importance Features
0 0.172318 price
1 0.028320 free_shipping
2 0.177162 total_images
3 0.181760 seller_rating
4 0.130010 shoe_size
5 0.174685 desc_fre_score
6 0.135746 desc_avg_grade_score
| MIT | ml-work/classification_problem.ipynb | numankh/HypeBeastHelper |
Given these feature importance values, a seller's rating has the most influence on the whether a shoe will sell, while free shipping has the least influence. SVM | from sklearn.svm import SVC
reg_svc = SVC()
reg_svc.fit(X_train, y_train)
y_pred = reg_svc.predict(X_test)
print(metrics.classification_report(y_test, y_pred))
print("roc_auc_score: ", roc_auc_score(y_test, y_pred))
print("f1 score: ", f1_score(y_test, y_pred)) | precision recall f1-score support
False 0.70 1.00 0.83 45
True 0.00 0.00 0.00 19
accuracy 0.70 64
macro avg 0.35 0.50 0.41 64
weighted avg 0.49 0.70 0.58 64
roc_auc_score: 0.5
f1 score: 0.0
| MIT | ml-work/classification_problem.ipynb | numankh/HypeBeastHelper |
K-Nearest Neighbors | from sklearn.neighbors import KNeighborsClassifier
reg_knn = KNeighborsClassifier()
reg_knn.fit(X_train, y_train)
y_pred = reg_knn.predict(X_test)
print(metrics.classification_report(y_test, y_pred))
print("roc_auc_score: ", roc_auc_score(y_test, y_pred))
print("f1 score: ", f1_score(y_test, y_pred)) | precision recall f1-score support
False 0.75 0.91 0.82 45
True 0.56 0.26 0.36 19
accuracy 0.72 64
macro avg 0.65 0.59 0.59 64
weighted avg 0.69 0.72 0.68 64
roc_auc_score: 0.5871345029239766
f1 score: 0.35714285714285715
| MIT | ml-work/classification_problem.ipynb | numankh/HypeBeastHelper |
phageParser - Analysis of Spacer Lengths C.K. Yildirim ([email protected])The latest version of this [IPython notebook](http://ipython.org/notebook.html) demo is available at [http://github.com/phageParser/phageParser](https://github.com/phageParser/phageParser/tree/django-dev/demos)To run this notebook locally:* `git clone` or [download](https://github.com/phageParser/phageParser/archive/master.zip) this repository* Install [Jupyter Notebook](http://jupyter.org/install.html)* In a command prompt, type `jupyter notebook` - the notebook server will launch in your browser* Navigate to the phageParser/demos folder and open the notebook Introduction This demo uses the REST API of phageParser to plot the distribution of spacer lengths.In this case, the API is consumed using the requests library and the json responses are parsed for gatheringbasepair length information of spacers. | %matplotlib inline
#Import packages
import requests
import json
import numpy as np
import random
import matplotlib.pyplot as plt
from matplotlib import mlab
import seaborn as sns
import pandas as pd
from scipy.stats import poisson
sns.set_palette("husl")
#Url of the phageParser API
apiurl = 'https://phageparser.herokuapp.com'
#Get the initial page for listing of accessible objects and get url for spacers
r=requests.get(apiurl)
spacerurl = r.json()['organisms']
#API is accessible by chunks of spacers that are in each page, get the total number of pages from meta field
r=requests.get(spacerurl)
last_page = r.json()['meta']['total_pages']
#Iterate through each page and store json response which only has length of spacers information
jbatches = []
for page in range(1,last_page):
#Exclude every field on spacer object other than length and move to a certain page
batch_url = spacerurl+'?page={}&exclude[]=*&include[]=loci.spacers.length'.format(page)
spacer_batch = requests.get(batch_url).json()
jbatches.append(spacer_batch)
#Get lengths of spacers per locus
org_spacer={}
for batch in jbatches:
for organism in batch['organisms']:
locusspacerlens = {}
if organism['loci'] == []:
continue
orgid = organism['loci'][0]['organism']
for locus in organism['loci']:
spacerlens = []
for spacer in locus['spacers']:
spacerlens.append(spacer['length'])
locusspacerlens[locus['id']]=np.array(spacerlens)
org_spacer[orgid] = locusspacerlens
#Get the global mean and variance
spacerbplengths = np.array([spacerlen for organism in org_spacer.values() for locusspacerlen in organism.values() for spacerlen in locusspacerlen]).flatten()
mu, sigma = spacerbplengths.mean(), spacerbplengths.std()
print("Calculated mean basepair length for spacers is {:.2f}+/-{:.2f}".format(mu,sigma)) | Calculated mean basepair length for spacers is 35.11+/-3.95
| MIT | demos/Spacer Length Analysis.ipynb | nataliyah123/phageParser |
Across the roughly ~3000 sequenced organisms that have what looks like a CRISPR locus, what is the distribution of CRISPR spacer lengths? The histogram below shows that spacer length is peaked at about 35 base pairs. The standard deviation of spacer length is 4 base pairs, but the distribution has large tails - there are many more long spacers than would be expected if the lengths were normally distributed (black dashed line) or Poisson distributed (red dashed line).Individual organisms (colours other than blue) have tighter distributions than the overall distribution. | #Plot histogram of spacer lengths across all organisms
norm = False # change to false to show totals, true to show everything normalized to 1
plt.figure()
bins=range(5,100)
plt.hist(spacerbplengths,bins=bins,normed=norm,label='All organisms')
plt.yscale('log')
if norm == False:
plt.ylim(5*10**-1,10**5)
else:
plt.ylim(10**-6,10**0)
plt.xlim(10,100)
#Plot normal and poisson distribution of length
x=np.unique(spacerbplengths)
if norm == False:
y = mlab.normpdf(x, mu, sigma)*len(spacerbplengths)
y2 = poisson.pmf(x,mu)*len(spacerbplengths)
else:
y = mlab.normpdf(x, mu, sigma)
y2 = poisson.pmf(x,mu)
plt.plot(x, y, 'k--', linewidth=1.5, label='Normal distribution')
plt.plot(x, y2, 'r--',linewidth=1.5, label='Poissson distribution')
#Plot histogram for a single organism
for i in range(4):
org_id = random.choice(list(org_spacer.keys()))
orgspacerlens = np.concatenate(list(org_spacer[org_id].values()))
plt.hist(orgspacerlens,bins=bins, normed=norm)
plt.ylabel("Number of spacers")
plt.xlabel("Spacer length")
plt.legend(); | _____no_output_____ | MIT | demos/Spacer Length Analysis.ipynb | nataliyah123/phageParser |
What the above plot suggests is that individual organisms and loci have narrow spacer length distributions but that the total distribution is quite broad. | #Calculate means and standard deviations of spacer length for all individual loci
means = []
stds = []
for org in org_spacer.values():
for arr in list(org.values()):
means.append(np.mean(arr))
stds.append(np.std(arr))
print("The mean of all individual locus standard deviations is "
+ str(round(np.mean(stds),2))
+ ", smaller than the spacer length standard deviations for all organisms combined.")
plt.figure()
plt.hist(stds,bins=range(0,30))
plt.xlabel("Standard deviation of spacer length within locus")
plt.ylabel("Number of loci")
plt.ylim(8*10**-1,10**4)
plt.yscale('log'); | The mean of all individual locus standard deviations is 1.31, smaller than the spacer length standard deviations for all organisms combined.
| MIT | demos/Spacer Length Analysis.ipynb | nataliyah123/phageParser |
The following cumulative version of the total spacer length histogram shows again the deviation from normal distribution at large spacer lengths. | fig, ax = plt.subplots(figsize=(8,4), dpi=100)
#Plot cumulative probability of data
sorted_data = np.sort(spacerbplengths)
ax.step(sorted_data, 1-np.arange(sorted_data.size)/sorted_data.size, label='Data')
#Plot normal distribution
x=np.unique(sorted_data)
y = mlab.normpdf(x, mu, sigma).cumsum()
y /= y[-1]
ax.plot(x, 1-y, 'k--', linewidth=0.5, label='Normal distribution')
#Format the figure and label
ax.set_yscale('log')
ax.grid(True)
ax.legend(loc='right')
ax.set_title('Cumulative step distribution of spacer lengths')
ax.set_xlabel("Spacer length (bps)")
ax.set_ylabel('Likelihood of occurrence of smaller spacers')
plt.show()
#Pick a random organism to plot the histogram for each locus
org_id = random.choice(list(org_spacer.keys()))
org_id=594
locusspacerlens = org_spacer[org_id]
fig, ax = plt.subplots(figsize=(8,4),dpi=100)
bins=range(30,45,1)
#Plot histogram of spacer length frequency
for loc in locusspacerlens:
sns.distplot(locusspacerlens[loc], ax=ax, kde=False, norm_hist=True, bins=bins)
plt.xlim([30,45])
#format the figure and label
ax.set_title("Histogram of spacer basepair lengths for organism with id {}".format(org_id))
ax.set_xlabel("Spacer length (bps)")
ax.set_ylabel("Occurence of spacers")
plt.show() | _____no_output_____ | MIT | demos/Spacer Length Analysis.ipynb | nataliyah123/phageParser |
Fraud Detection for Automobile Claims: Create an End to End Pipeline BackgroundIn this notebook, we will build a SageMaker Pipeline that automates the entire end-to-end process of preparing, training, and deploying a model that detects automobile claim fraud. For a more detailed explanation of each step of the pipeline, you can look the series of notebooks (listed below) that implements this same process using a manual approach. Please see the [README.md](README.md) for more information about this use case implemented by this series of notebooks. 1. [Fraud Detection for Automobile Claims: Data Exploration](./0-AutoClaimFraudDetection.ipynb)1. [Fraud Detection for Automobile Claims: Data Preparation, Process, and Store Features](./1-data-prep-e2e.ipynb)1. [Fraud Detection for Automobile Claims: Train, Check Bias, Tune, Record Lineage, and Register a Model](./2-lineage-train-assess-bias-tune-registry-e2e.ipynb)1. [Fraud Detection for Automobile Claims: Mitigate Bias, Train, Register, and Deploy Unbiased Model](./3-mitigate-bias-train-model2-registry-e2e.ipynb) Contents1. [Prerequisites](Prerequisites)1. [Architecture: Create a SageMaker Pipeline to Automate All the Steps from Data Prep to Model Deployment](Architecture:-Create-a-SageMaker-Pipeline-to-Automate-All-the-Steps-from-Data-Prep-to-Model-Deployment)1. [Creating an Automated Pipeline using SageMaker Pipeline](Creating-an-Automated-Pipeline-using-SageMaker-Pipeline)1. [Clean-Up](Clean-Up) Prerequisites---- Install required and/or update third-party libraries | !python -m pip install -Uq pip
!python -m pip install -q awswrangler==2.2.0 imbalanced-learn==0.7.0 sagemaker==2.41.0 boto3==1.17.70 | _____no_output_____ | Apache-2.0 | end_to_end/fraud_detection/pipeline-e2e.ipynb | qidewenwhen/amazon-sagemaker-examples |
Import libraries | import json
import boto3
import pathlib
import sagemaker
import numpy as np
import pandas as pd
import awswrangler as wr
import string
import demo_helpers
from sagemaker.xgboost.estimator import XGBoost
from sagemaker.workflow.pipeline import Pipeline
from sagemaker.workflow.steps import CreateModelStep
from sagemaker.sklearn.processing import SKLearnProcessor
from sagemaker.workflow.step_collections import RegisterModel
from sagemaker.workflow.steps import ProcessingStep, TrainingStep
from sagemaker.workflow.parameters import ParameterInteger, ParameterFloat, ParameterString | _____no_output_____ | Apache-2.0 | end_to_end/fraud_detection/pipeline-e2e.ipynb | qidewenwhen/amazon-sagemaker-examples |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.