code
stringlengths 2.5k
6.36M
| kind
stringclasses 2
values | parsed_code
stringlengths 0
404k
| quality_prob
float64 0
0.98
| learning_prob
float64 0.03
1
|
---|---|---|---|---|
<a href="https://colab.research.google.com/github/OUCTheoryGroup/colab_demo/blob/master/11_MixUp_ICLR2018.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.autograd import Variable
# LeNet 模型
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16*5*5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
out = F.relu(self.conv1(x))
out = F.max_pool2d(out, 2)
out = F.relu(self.conv2(out))
out = F.max_pool2d(out, 2)
out = out.view(out.size(0), -1)
out = F.relu(self.fc1(out))
out = F.relu(self.fc2(out))
out = self.fc3(out)
return out
def mixup_data(x, y, alpha=1.0):
'''Returns mixed inputs, pairs of targets, and lambda'''
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
batch_size = x.size()[0]
index = torch.randperm(batch_size).cuda()
mixed_x = lam * x + (1 - lam) * x[index, :]
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam
def mixup_criterion(criterion, pred, y_a, y_b, lam):
return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
transform_train = transforms.Compose([ transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
transform_test = transforms.Compose([ transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
trainset = datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
testset = datasets.CIFAR10(root='./data', train=False, download=False, transform=transform_test)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True, num_workers=2)
testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=False, num_workers=2)
# MixUp的重要参数 alpha
alpha = 0.5
net = LeNet().cuda()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.0001)
# 网络训练
net.train()
for epoch in range(30):
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets_a, targets_b, lam = mixup_data(inputs, targets, alpha)
outputs = net(inputs)
loss = mixup_criterion(criterion, outputs, targets_a, targets_b, lam)
train_loss += loss.data
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += (lam * predicted.eq(targets_a.data).cpu().sum().float()
+ (1 - lam) * predicted.eq(targets_b.data).cpu().sum().float())
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('Epoch: %d | Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (epoch+1, train_loss/(batch_idx+1), 100.*correct/total, correct, total))
# 网络测试
net.eval()
test_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.cuda(), targets.cuda()
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.data
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
print(' Test Accuracy: %.3f %%' % (100.*correct/total))
```
|
github_jupyter
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.autograd import Variable
# LeNet 模型
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16*5*5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
out = F.relu(self.conv1(x))
out = F.max_pool2d(out, 2)
out = F.relu(self.conv2(out))
out = F.max_pool2d(out, 2)
out = out.view(out.size(0), -1)
out = F.relu(self.fc1(out))
out = F.relu(self.fc2(out))
out = self.fc3(out)
return out
def mixup_data(x, y, alpha=1.0):
'''Returns mixed inputs, pairs of targets, and lambda'''
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
batch_size = x.size()[0]
index = torch.randperm(batch_size).cuda()
mixed_x = lam * x + (1 - lam) * x[index, :]
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam
def mixup_criterion(criterion, pred, y_a, y_b, lam):
return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
transform_train = transforms.Compose([ transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
transform_test = transforms.Compose([ transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
trainset = datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
testset = datasets.CIFAR10(root='./data', train=False, download=False, transform=transform_test)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True, num_workers=2)
testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=False, num_workers=2)
# MixUp的重要参数 alpha
alpha = 0.5
net = LeNet().cuda()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.0001)
# 网络训练
net.train()
for epoch in range(30):
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets_a, targets_b, lam = mixup_data(inputs, targets, alpha)
outputs = net(inputs)
loss = mixup_criterion(criterion, outputs, targets_a, targets_b, lam)
train_loss += loss.data
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += (lam * predicted.eq(targets_a.data).cpu().sum().float()
+ (1 - lam) * predicted.eq(targets_b.data).cpu().sum().float())
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('Epoch: %d | Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (epoch+1, train_loss/(batch_idx+1), 100.*correct/total, correct, total))
# 网络测试
net.eval()
test_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.cuda(), targets.cuda()
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.data
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
print(' Test Accuracy: %.3f %%' % (100.*correct/total))
| 0.916217 | 0.90686 |
# Object Detection
This lab is similar to the previous lab, except now instead of printing out the bounding box coordinates, you can visualize these bounding boxes on top of the image!
## Setup
```
# For running inference on the TF-Hub module.
import tensorflow as tf
import tensorflow_hub as hub
# For downloading the image.
import matplotlib.pyplot as plt
import tempfile
from six.moves.urllib.request import urlopen
from six import BytesIO
# For drawing onto the image.
import numpy as np
from PIL import Image
from PIL import ImageColor
from PIL import ImageDraw
from PIL import ImageFont
from PIL import ImageOps
# For measuring the inference time.
import time
# Print Tensorflow version
print(tf.__version__)
# Check available GPU devices.
print("The following GPU devices are available: %s" % tf.test.gpu_device_name())
```
### Select and load the model
As in the previous lab, you can choose an object detection module. Here are two that we've selected for you:
* [ssd + mobilenet V2](https://tfhub.dev/tensorflow/ssd_mobilenet_v2/2) small and fast.
* [FasterRCNN + InceptionResNet V2](https://tfhub.dev/google/faster_rcnn/openimages_v4/inception_resnet_v2/1): high accuracy
```
# you can switch the commented lines here to pick the other model
# ssd mobilenet version 2
module_handle = "https://tfhub.dev/google/openimages_v4/ssd/mobilenet_v2/1"
# You can choose inception resnet version 2 instead
#module_handle = "https://tfhub.dev/google/faster_rcnn/openimages_v4/inception_resnet_v2/1"
```
#### Load the model
Next, you'll load the model specified by the `module_handle`.
- This will take a few minutes to load the model.
```
model = hub.load(module_handle)
```
#### Choose the default signature
As before, you can check the available signatures using `.signature.keys()`
```
# take a look at the available signatures for this particular model
model.signatures.keys()
```
Please choose the 'default' signature for your object detector.
```
detector = model.signatures['default']
```
### download_and_resize_image
As you saw in the previous lab, this function downloads an image specified by a given "url", pre-processes it, and then saves it to disk.
- What new compared to the previous lab is that you an display the image if you set the parameter `display=True`.
```
def display_image(image):
"""
Displays an image inside the notebook.
This is used by download_and_resize_image()
"""
fig = plt.figure(figsize=(20, 15))
plt.grid(False)
plt.imshow(image)
def download_and_resize_image(url, new_width=256, new_height=256, display=False):
'''
Fetches an image online, resizes it and saves it locally.
Args:
url (string) -- link to the image
new_width (int) -- size in pixels used for resizing the width of the image
new_height (int) -- size in pixels used for resizing the length of the image
Returns:
(string) -- path to the saved image
'''
# create a temporary file ending with ".jpg"
_, filename = tempfile.mkstemp(suffix=".jpg")
# opens the given URL
response = urlopen(url)
# reads the image fetched from the URL
image_data = response.read()
# puts the image data in memory buffer
image_data = BytesIO(image_data)
# opens the image
pil_image = Image.open(image_data)
# resizes the image. will crop if aspect ratio is different.
pil_image = ImageOps.fit(pil_image, (new_width, new_height), Image.ANTIALIAS)
# converts to the RGB colorspace
pil_image_rgb = pil_image.convert("RGB")
# saves the image to the temporary file created earlier
pil_image_rgb.save(filename, format="JPEG", quality=90)
print("Image downloaded to %s." % filename)
if display:
display_image(pil_image)
return filename
```
### Select and load an image
Load a public image from Open Images v4, save locally, and display.
```
# By Heiko Gorski, Source: https://commons.wikimedia.org/wiki/File:Naxos_Taverna.jpg
image_url = "https://upload.wikimedia.org/wikipedia/commons/6/60/Naxos_Taverna.jpg" #@param
downloaded_image_path = download_and_resize_image(image_url, 1280, 856, True)
```
### Draw bounding boxes
To build on what you saw in the previous lab, you can now visualize the predicted bounding boxes, overlaid on top of the image.
- You can use `draw_boxes` to do this. It will use `draw_bounding_box_on_image` to draw the bounding boxes.
```
def draw_bounding_box_on_image(image,
ymin,
xmin,
ymax,
xmax,
color,
font,
thickness=4,
display_str_list=()):
"""
Adds a bounding box to an image.
Args:
image -- the image object
ymin -- bounding box coordinate
xmin -- bounding box coordinate
ymax -- bounding box coordinate
xmax -- bounding box coordinate
color -- color for the bounding box edges
font -- font for class label
thickness -- edge thickness of the bounding box
display_str_list -- class labels for each object detected
Returns:
No return. The function modifies the `image` argument
that gets passed into this function
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
# scale the bounding box coordinates to the height and width of the image
(left, right, top, bottom) = (xmin * im_width, xmax * im_width,
ymin * im_height, ymax * im_height)
# define the four edges of the detection box
draw.line([(left, top), (left, bottom), (right, bottom), (right, top),
(left, top)],
width=thickness,
fill=color)
# If the total height of the display strings added to the top of the bounding
# box exceeds the top of the image, stack the strings below the bounding box
# instead of above.
display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]
# Each display_str has a top and bottom margin of 0.05x.
total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)
if top > total_display_str_height:
text_bottom = top
else:
text_bottom = top + total_display_str_height
# Reverse list and print from bottom to top.
for display_str in display_str_list[::-1]:
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle([(left, text_bottom - text_height - 2 * margin),
(left + text_width, text_bottom)],
fill=color)
draw.text((left + margin, text_bottom - text_height - margin),
display_str,
fill="black",
font=font)
text_bottom -= text_height - 2 * margin
def draw_boxes(image, boxes, class_names, scores, max_boxes=10, min_score=0.1):
"""
Overlay labeled boxes on an image with formatted scores and label names.
Args:
image -- the image as a numpy array
boxes -- list of detection boxes
class_names -- list of classes for each detected object
scores -- numbers showing the model's confidence in detecting that object
max_boxes -- maximum detection boxes to overlay on the image (default is 10)
min_score -- minimum score required to display a bounding box
Returns:
image -- the image after detection boxes and classes are overlaid on the original image.
"""
colors = list(ImageColor.colormap.values())
try:
font = ImageFont.truetype("/usr/share/fonts/truetype/liberation/LiberationSansNarrow-Regular.ttf",
25)
except IOError:
print("Font not found, using default font.")
font = ImageFont.load_default()
for i in range(min(boxes.shape[0], max_boxes)):
# only display detection boxes that have the minimum score or higher
if scores[i] >= min_score:
ymin, xmin, ymax, xmax = tuple(boxes[i])
display_str = "{}: {}%".format(class_names[i].decode("ascii"),
int(100 * scores[i]))
color = colors[hash(class_names[i]) % len(colors)]
image_pil = Image.fromarray(np.uint8(image)).convert("RGB")
# draw one bounding box and overlay the class labels onto the image
draw_bounding_box_on_image(image_pil,
ymin,
xmin,
ymax,
xmax,
color,
font,
display_str_list=[display_str])
np.copyto(image, np.array(image_pil))
return image
```
### run_detector
This function will take in the object detection model `detector` and the path to a sample image, then use this model to detect objects.
- This time, run_dtector also calls `draw_boxes` to draw the predicted bounding boxes.
```
def load_img(path):
'''
Loads a JPEG image and converts it to a tensor.
Args:
path (string) -- path to a locally saved JPEG image
Returns:
(tensor) -- an image tensor
'''
# read the file
img = tf.io.read_file(path)
# convert to a tensor
img = tf.image.decode_jpeg(img, channels=3)
return img
def run_detector(detector, path):
'''
Runs inference on a local file using an object detection model.
Args:
detector (model) -- an object detection model loaded from TF Hub
path (string) -- path to an image saved locally
'''
# load an image tensor from a local file path
img = load_img(path)
# add a batch dimension in front of the tensor
converted_img = tf.image.convert_image_dtype(img, tf.float32)[tf.newaxis, ...]
# run inference using the model
start_time = time.time()
result = detector(converted_img)
end_time = time.time()
# save the results in a dictionary
result = {key:value.numpy() for key,value in result.items()}
# print results
print("Found %d objects." % len(result["detection_scores"]))
print("Inference time: ", end_time-start_time)
# draw predicted boxes over the image
image_with_boxes = draw_boxes(
img.numpy(), result["detection_boxes"],
result["detection_class_entities"], result["detection_scores"])
# display the image
display_image(image_with_boxes)
```
### Run the detector on your selected image!
```
run_detector(detector, downloaded_image_path)
```
### Run the detector on more images
Perform inference on some additional images of your choice and check how long inference takes.
```
image_urls = [
# Source: https://commons.wikimedia.org/wiki/File:The_Coleoptera_of_the_British_islands_(Plate_125)_(8592917784).jpg
"https://upload.wikimedia.org/wikipedia/commons/1/1b/The_Coleoptera_of_the_British_islands_%28Plate_125%29_%288592917784%29.jpg",
# By Américo Toledano, Source: https://commons.wikimedia.org/wiki/File:Biblioteca_Maim%C3%B3nides,_Campus_Universitario_de_Rabanales_007.jpg
"https://upload.wikimedia.org/wikipedia/commons/thumb/0/0d/Biblioteca_Maim%C3%B3nides%2C_Campus_Universitario_de_Rabanales_007.jpg/1024px-Biblioteca_Maim%C3%B3nides%2C_Campus_Universitario_de_Rabanales_007.jpg",
# Source: https://commons.wikimedia.org/wiki/File:The_smaller_British_birds_(8053836633).jpg
"https://upload.wikimedia.org/wikipedia/commons/0/09/The_smaller_British_birds_%288053836633%29.jpg",
]
def detect_img(image_url):
start_time = time.time()
image_path = download_and_resize_image(image_url, 640, 480)
run_detector(detector, image_path)
end_time = time.time()
print("Inference time:",end_time-start_time)
detect_img(image_urls[0])
detect_img(image_urls[1])
detect_img(image_urls[2])
```
|
github_jupyter
|
# For running inference on the TF-Hub module.
import tensorflow as tf
import tensorflow_hub as hub
# For downloading the image.
import matplotlib.pyplot as plt
import tempfile
from six.moves.urllib.request import urlopen
from six import BytesIO
# For drawing onto the image.
import numpy as np
from PIL import Image
from PIL import ImageColor
from PIL import ImageDraw
from PIL import ImageFont
from PIL import ImageOps
# For measuring the inference time.
import time
# Print Tensorflow version
print(tf.__version__)
# Check available GPU devices.
print("The following GPU devices are available: %s" % tf.test.gpu_device_name())
# you can switch the commented lines here to pick the other model
# ssd mobilenet version 2
module_handle = "https://tfhub.dev/google/openimages_v4/ssd/mobilenet_v2/1"
# You can choose inception resnet version 2 instead
#module_handle = "https://tfhub.dev/google/faster_rcnn/openimages_v4/inception_resnet_v2/1"
model = hub.load(module_handle)
# take a look at the available signatures for this particular model
model.signatures.keys()
detector = model.signatures['default']
def display_image(image):
"""
Displays an image inside the notebook.
This is used by download_and_resize_image()
"""
fig = plt.figure(figsize=(20, 15))
plt.grid(False)
plt.imshow(image)
def download_and_resize_image(url, new_width=256, new_height=256, display=False):
'''
Fetches an image online, resizes it and saves it locally.
Args:
url (string) -- link to the image
new_width (int) -- size in pixels used for resizing the width of the image
new_height (int) -- size in pixels used for resizing the length of the image
Returns:
(string) -- path to the saved image
'''
# create a temporary file ending with ".jpg"
_, filename = tempfile.mkstemp(suffix=".jpg")
# opens the given URL
response = urlopen(url)
# reads the image fetched from the URL
image_data = response.read()
# puts the image data in memory buffer
image_data = BytesIO(image_data)
# opens the image
pil_image = Image.open(image_data)
# resizes the image. will crop if aspect ratio is different.
pil_image = ImageOps.fit(pil_image, (new_width, new_height), Image.ANTIALIAS)
# converts to the RGB colorspace
pil_image_rgb = pil_image.convert("RGB")
# saves the image to the temporary file created earlier
pil_image_rgb.save(filename, format="JPEG", quality=90)
print("Image downloaded to %s." % filename)
if display:
display_image(pil_image)
return filename
# By Heiko Gorski, Source: https://commons.wikimedia.org/wiki/File:Naxos_Taverna.jpg
image_url = "https://upload.wikimedia.org/wikipedia/commons/6/60/Naxos_Taverna.jpg" #@param
downloaded_image_path = download_and_resize_image(image_url, 1280, 856, True)
def draw_bounding_box_on_image(image,
ymin,
xmin,
ymax,
xmax,
color,
font,
thickness=4,
display_str_list=()):
"""
Adds a bounding box to an image.
Args:
image -- the image object
ymin -- bounding box coordinate
xmin -- bounding box coordinate
ymax -- bounding box coordinate
xmax -- bounding box coordinate
color -- color for the bounding box edges
font -- font for class label
thickness -- edge thickness of the bounding box
display_str_list -- class labels for each object detected
Returns:
No return. The function modifies the `image` argument
that gets passed into this function
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
# scale the bounding box coordinates to the height and width of the image
(left, right, top, bottom) = (xmin * im_width, xmax * im_width,
ymin * im_height, ymax * im_height)
# define the four edges of the detection box
draw.line([(left, top), (left, bottom), (right, bottom), (right, top),
(left, top)],
width=thickness,
fill=color)
# If the total height of the display strings added to the top of the bounding
# box exceeds the top of the image, stack the strings below the bounding box
# instead of above.
display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]
# Each display_str has a top and bottom margin of 0.05x.
total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)
if top > total_display_str_height:
text_bottom = top
else:
text_bottom = top + total_display_str_height
# Reverse list and print from bottom to top.
for display_str in display_str_list[::-1]:
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle([(left, text_bottom - text_height - 2 * margin),
(left + text_width, text_bottom)],
fill=color)
draw.text((left + margin, text_bottom - text_height - margin),
display_str,
fill="black",
font=font)
text_bottom -= text_height - 2 * margin
def draw_boxes(image, boxes, class_names, scores, max_boxes=10, min_score=0.1):
"""
Overlay labeled boxes on an image with formatted scores and label names.
Args:
image -- the image as a numpy array
boxes -- list of detection boxes
class_names -- list of classes for each detected object
scores -- numbers showing the model's confidence in detecting that object
max_boxes -- maximum detection boxes to overlay on the image (default is 10)
min_score -- minimum score required to display a bounding box
Returns:
image -- the image after detection boxes and classes are overlaid on the original image.
"""
colors = list(ImageColor.colormap.values())
try:
font = ImageFont.truetype("/usr/share/fonts/truetype/liberation/LiberationSansNarrow-Regular.ttf",
25)
except IOError:
print("Font not found, using default font.")
font = ImageFont.load_default()
for i in range(min(boxes.shape[0], max_boxes)):
# only display detection boxes that have the minimum score or higher
if scores[i] >= min_score:
ymin, xmin, ymax, xmax = tuple(boxes[i])
display_str = "{}: {}%".format(class_names[i].decode("ascii"),
int(100 * scores[i]))
color = colors[hash(class_names[i]) % len(colors)]
image_pil = Image.fromarray(np.uint8(image)).convert("RGB")
# draw one bounding box and overlay the class labels onto the image
draw_bounding_box_on_image(image_pil,
ymin,
xmin,
ymax,
xmax,
color,
font,
display_str_list=[display_str])
np.copyto(image, np.array(image_pil))
return image
def load_img(path):
'''
Loads a JPEG image and converts it to a tensor.
Args:
path (string) -- path to a locally saved JPEG image
Returns:
(tensor) -- an image tensor
'''
# read the file
img = tf.io.read_file(path)
# convert to a tensor
img = tf.image.decode_jpeg(img, channels=3)
return img
def run_detector(detector, path):
'''
Runs inference on a local file using an object detection model.
Args:
detector (model) -- an object detection model loaded from TF Hub
path (string) -- path to an image saved locally
'''
# load an image tensor from a local file path
img = load_img(path)
# add a batch dimension in front of the tensor
converted_img = tf.image.convert_image_dtype(img, tf.float32)[tf.newaxis, ...]
# run inference using the model
start_time = time.time()
result = detector(converted_img)
end_time = time.time()
# save the results in a dictionary
result = {key:value.numpy() for key,value in result.items()}
# print results
print("Found %d objects." % len(result["detection_scores"]))
print("Inference time: ", end_time-start_time)
# draw predicted boxes over the image
image_with_boxes = draw_boxes(
img.numpy(), result["detection_boxes"],
result["detection_class_entities"], result["detection_scores"])
# display the image
display_image(image_with_boxes)
run_detector(detector, downloaded_image_path)
image_urls = [
# Source: https://commons.wikimedia.org/wiki/File:The_Coleoptera_of_the_British_islands_(Plate_125)_(8592917784).jpg
"https://upload.wikimedia.org/wikipedia/commons/1/1b/The_Coleoptera_of_the_British_islands_%28Plate_125%29_%288592917784%29.jpg",
# By Américo Toledano, Source: https://commons.wikimedia.org/wiki/File:Biblioteca_Maim%C3%B3nides,_Campus_Universitario_de_Rabanales_007.jpg
"https://upload.wikimedia.org/wikipedia/commons/thumb/0/0d/Biblioteca_Maim%C3%B3nides%2C_Campus_Universitario_de_Rabanales_007.jpg/1024px-Biblioteca_Maim%C3%B3nides%2C_Campus_Universitario_de_Rabanales_007.jpg",
# Source: https://commons.wikimedia.org/wiki/File:The_smaller_British_birds_(8053836633).jpg
"https://upload.wikimedia.org/wikipedia/commons/0/09/The_smaller_British_birds_%288053836633%29.jpg",
]
def detect_img(image_url):
start_time = time.time()
image_path = download_and_resize_image(image_url, 640, 480)
run_detector(detector, image_path)
end_time = time.time()
print("Inference time:",end_time-start_time)
detect_img(image_urls[0])
detect_img(image_urls[1])
detect_img(image_urls[2])
| 0.878223 | 0.957477 |
# Shot Angles
We want to find what angle players are coming at the ball, so we can find out what angle they are shooting and whether or not their try to bounce the ball.
```
# Add this so we can import our haxml code from outside the notebooks folder.
import sys
sys.path.append("../")
from haxml.prediction import (
generate_rows_demo
)
from haxml.utils import (
load_match,
inflate_match,
get_stadiums,
get_matches_metadata,
to_clock,
get_opposing_goalpost,
stadium_distance,
angle_from_goal,
is_scored_goal
)
from haxml.viz import (
plot_stadium,
zoom_stadium,
plot_positions
)
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import json
import pandas as pd
stadiums = get_stadiums("../data/stadiums.json")
metadata = get_matches_metadata("../data/matches_metadata.csv")
meta_df = pd.DataFrame(metadata)
meta_df["scored_goals_total"] = meta_df["scored_goals_red"] + meta_df["scored_goals_blue"]
meta_df.sort_values(by="scored_goals_total", ascending=False).head()
def generate_rows_angles(match, stadium):
"""
Generates target and features for each kick in the match.
Produces two features for demo classifiers:
goal_distance: Distance from where ball was kicked to goal midpoint.
goal_angle: Angle (in radians) between straight shot from where ball was
kicked to goal midpoint.
Args:
match: Inflated match data (dict).
stadium: Stadium data (dict).
Returns:
Generator of dicts with values for each kick in the given match.
Includes prediction target "ag" (actual goals) which is 1 for a scored
goal (goal or error) and 0 otherwise, "index" which is the index of the
kick in the match kick list, and all the other features needed for
prediction and explanation.
"""
for i, kick in enumerate(match["kicks"]):
gp = get_opposing_goalpost(stadium, kick["fromTeam"])
x = kick["fromX"]
y = kick["fromY"]
gx = gp["mid"]["x"]
gy = gp["mid"]["y"]
dist = stadium_distance(x, y, gx, gy)
angle = angle_from_goal(x, y, gx, gy)
row = {
"ag": 1 if is_scored_goal(kick) else 0,
"index": i,
"time": kick["time"],
"x": x,
"y": y,
"from_name": kick["fromName"],
"from_team": kick["fromTeam"],
"goal_x": gx,
"goal_y": gy,
"goal_distance": dist,
"goal_angle": angle,
"team": kick["fromTeam"],
"stadium": match["stadium"]
}
yield row
#1800 (weirddd)
#1097 2 work and one doesnt
#Going 2 indeces down works so much detter with this match 222
#Match 420 has the first goal as a bit off and the other 3 as accurate (first goals least dist is 105?)
meta = metadata[1097]
meta
key = meta["match_id"]
infile = "../data/packed_matches/{}.json".format(key)
stadium = stadiums[meta["stadium"]]
match = load_match(infile)
row_gen = generate_rows_angles(match, stadium)
df = pd.DataFrame(row_gen)
df["match"] = key
df.head()
pd.DataFrame(match["positions"]).head(20)
example = df.query("ag == 1").to_dict(orient="records")[0]
example
help(sorted)
def get_positions_at_time(positions, t):
"""
Return a list of positions (dicts) closest to, but before time t.
"""
# Assume positions list is already sorted.
# frame is a list of positions (dicts) that have the same timestamp.
frame = []
frame_time = 0.0
for pos in positions:
# If we passed the target time t, return the frame we have
if pos["time"] > t:
break
# If this positions is part of the current frame, add it
if pos["time"] == frame_time:
frame.append(pos)
# If the current frame is over, make a new frame and add this position to it
else:
frame = []
frame.append(pos)
frame_time = pos["time"]
return frame
def get_positions_in_range(positions, start, end):
"""
Return a list of positions (dicts) between start and end (inclusive).
"""
assert start <= end, "Time `start` must be before `end`."
def is_in_time_range(pos):
return pos["time"] >= start and pos["time"] <= end
return list(filter(is_in_time_range, positions))
offset = 2 # seconds
print("Get positions at {}.".format(example["time"] - offset))
r1 = get_positions_at_time(match["positions"], example["time"])
pd.DataFrame(r1)
print("Get positions between {} and {}.".format(example["time"] - offset, example["time"]))
r2 = get_positions_in_range(match["positions"], example["time"] - offset, example["time"])
pd.DataFrame(r2)
help(plot_positions)
fig, ax = plot_positions(r1, stadium)
fig.set_size_inches(*zoom_stadium(stadium["bounds"]))
for pos in r1:
ax.text(pos["x"], pos["y"], pos["name"])
fig
fig, ax = plot_positions(r2, stadium)
fig.set_size_inches(*zoom_stadium(stadium["bounds"]))
# Most recent position for each player
# Key: Name, Value: position (dict)
last_pos = {}
for pos in r2:
last_pos[pos["name"]] = pos
for pos in last_pos.values():
ax.text(pos["x"], pos["y"], pos["name"])
fig
df.query("ag == 1")
bounce = df.query("ag == 1").to_dict(orient="records")[1]
bounce
end_time = bounce["time"] - 0.1
start_time = end_time - 4
print(f"Start: {to_clock(start_time)}")
print(f"End : {to_clock(end_time)}")
time_range = get_positions_in_range(match["positions"], start_time, end_time)
fig, ax = plot_positions(time_range, stadium)
fig.set_size_inches(*zoom_stadium(stadium["bounds"]))
# Most recent position for each player
# Key: Name, Value: position (dict)
last_pos = {}
for pos in time_range:
last_pos[pos["name"]] = pos
for pos in last_pos.values():
ax.text(pos["x"], pos["y"], pos["name"])
fig
```
Note: Vinesh needs to fix the plotting methods, because they are mirror-images vertically, but this shouldn't be a big deal for now.
Notes for computing player/ball angle:
- Players can pivot really quickly, so the angle is very sensitive, small changes can have big effects
- We want to get the angle between player and ball in the last frame before they kick the ball
- We want to compare the angle between the player and the ball to the goal
- If we draw a ray from the position of the player through the position of the ball, that tells us the shot course
- If the shot course is going into the goal, it's a direct shot
- If the shot course is going away from the goal, they may be trying to bounce it in
- Where does the shot course intersect with the goal line?
- The goal line is a vertical line at the X-coordinate of the goal
- If the intersection is between the posts, it's shooting at the goal, otherwise, away from the goal
- If the intersection point is outside the field, they are bouncing off the sidelines
- We don't know where the sideline is exactly
- You can pick a certain distance that if the intersection is far from the posts, it was a sideline bounce
- Keep in mind, stadiums have different sizes! Try to use relative sizes
- The get positions by time range returns a list of dicts
- But you could write one that returns a list of lists of dicts, where each sub-list is a single frame
Pseudocode for computing player/ball angle:
- Find the last frame before the player kicks the ball
- Pick a time range, get the positions
- For each frame (positions grouped by time), calculate distance between kicker and ball
- Search backwards, from the last frame, and once the distance stops decreasing, pick that frame
- Compute the shot course and intersection
- Use the shot course and intersection to make some features!
```
# We can get the X-coordinate of the goal like this:
# If your team is red, then you should be shooting at the blue goal
gp_blue = get_opposing_goalpost(stadium, "red")
# We can just use the coordinates of the midpoint:
print(gp_blue["mid"])
# After we get an intersection point, we want to know if it is between the posts
print(gp_blue["posts"])
# We can get the bounds of the field:
print(stadium["bounds"])
""" my brainstorming lol"""
```
Planning on making these functions:
1) player_ball_angle-- gets the angle of the shot (by finding the angle of player to ball)
2) shot_intersection -- gets where the shot is going to intersect (takes in the shot_course and type of field)
3) shot_to_goal -- gives back a corrrelation number between 0 to 1 (1 being that the shot angle is the exact same as the ball/goal and 0 being the opposite direction) (higher the number, more XG awarded)
4) post_award -- if the shot_intersection is on the post, award 0.5
pre-existing functions to utilize:
Getting the angle between player and ball:
get_positions_in_range()
get_positions_at_time()
Compute the shot intersection:
player_ball_angle
stadium (get bounds, mid, and size)
get_opposing_goalpost()
```
def player_ball_angle(x,y,bx,by):
'''Finds the expected angle of the shot (Using midpoints)
Args:
x: x position of player
y: y position of player
bx: x position ball
by: y position of ball
Returns:
Int that represents the angle of the shot (which is the angle of the player to the ball)
'''
# abs of x distance and y distnace between ball and player
dx = float(abs(x - gx))
dy = float(abs(y - gy))
# Avoid division by zero error.
angle = math.atan(dx / dy) if dy > 0 else math.pi / 2.0
return angle
def shot_intersection(match,kick, stadium, offset):
'''Finds where the ball would intersect
Args:
match: Which match it is
kick: Which kick we want to measure
staduim: What stadium size it is (so we know where the goals and bounds are)
Returns:
Int 1 or 0 if the ball is going towars the goal or not
'''
#Getting last frame before the kick
print("Calculate intersection at time: {}".format(kick["time"] - offset))
#frame = get_positions_at_time(match["positions"], kick["time"] - offset)
#Using in range and tracing back to see what frame was right before it left the foot
frame = get_positions_in_range(match["positions"], kick["time"] - offset,kick["time"])
#A list of lists with only info about player we want and ball
shooter_frames = []
ball_frames = []
print(kick['from_name'])
for i in frame:
if i['name'] == kick['from_name']:
shooter_frames.append(i)
elif i['type'] == 'ball':
ball_frames.append(i)
print((shooter_frames))
print((ball_frames))
#print(shooter_frames)
#print(ball_frames)
#picking frame with least dist
least_dist = float(math.inf)
btwn_dist = {'upper': 1, 'lower':1}
player_position = {}
ball_position = {}
length = min(len(shooter_frames),len(ball_frames))
set_dist = 30 #ball and player are at least get within 30 units then we assume that it was kicked
for i in range(length-1,-1,-1): #frame len of ball and shooter should be the same
dist = stadium_distance(shooter_frames[i]['x'],shooter_frames[i]['y'],ball_frames[i]['x'],ball_frames[i]['y'])
print(dist)
if dist <= set_dist:
player_position = shooter_frames[i]
ball_position = ball_frames[i]
break
if not bool(ball_position) or not bool(player_position): #The dictionaries were not populated yet so default to getting least distance
print("using least diff")
for i in range(length-1,-1,-1): #frame len of ball and shooter should be the same
dist = stadium_distance(shooter_frames[i]['x'],shooter_frames[i]['y'],ball_frames[i]['x'],ball_frames[i]['y'])
print(dist)
if dist <= least_dist:
player_position = shooter_frames[i]
ball_position = ball_frames[i]
least_dist = dist
else: #stopped decreasing
print(shooter_frames[i]['time'])
print("breaking")
break
#print("least distance is" + str(least_dist))
last = len(shooter_frames)-1
print("last frame is "+ str(stadium_distance(shooter_frames[0]['x'],shooter_frames[0]['y'],ball_frames[0]['x'],ball_frames[0]['y'])))
#print(frame)
print(player_position)
print(ball_position)
#Getting goal positions
goal_mid = get_opposing_goalpost(stadium, kick['team'])['mid']
#print(goal_mid)
#Extend line from shot angle (can't extend lines easily)
y_val = point_slope(
player_position,
slope(player_position['x'], player_position['y'], ball_position['x'], ball_position['y']),
goal_mid['x']
)
#Checking if the projection between the posts
intersect = { 'x': goal_mid['x'], 'y': y_val }
return player_position, ball_position, intersect
def point_slope(p, slope,x_goal):
#y - y1 = m(x-x1) --> y=mx-mx1+y1 (returning the y)
y_val = (slope*x_goal)-(slope*p['x'])+p['y']
return y_val
def slope(x1, y1, x2, y2):
m = (y2-y1)/(x2-x1)
return m
example = df.query("ag == 1").to_dict(orient="records")[0]
example
end_time = example["time"]
start_time = end_time - 2
print(f"Start: {start_time}")
print(f"End : {end_time}")
time_range = get_positions_in_range(match["positions"], start_time, end_time)
player_pos, ball_pos, intersect = shot_intersection(match,example,stadium, offset=1)
print(intersect)
fig, ax = plot_positions(time_range, stadium)
fig.set_size_inches(*zoom_stadium(stadium["bounds"]))
on_goal = shot_on_goal(match, example, intersect, stadium)
print("on_goal feature " + str(on_goal))
# Most recent position for each player
# Key: Name, Value: position (dict)
last_pos = {}
for pos in time_range:
last_pos[pos["name"]] = pos
for pos in last_pos.values():
ax.text(pos["x"], pos["y"], pos["name"])
ax.plot(
[player_pos['x'], ball_pos['x'], intersect['x']],
[player_pos['y'], ball_pos['y'], intersect['y']],
color="green", linewidth=1
)
fig
import math
def dist_formula(x1,y1,x2,y2):
dist = math.sqrt( ((x1-x2)**2)+((y1-y2)**2) )
return dist
def speed_player(match,kick,player_name, offset):
'''' Speed of the player
Args:
match: Which match it is
kick: Which kick we want to measure
player_name: What player do we want to measure the speed for
Returns:
Int that represents the speed of the player
'''
#Getting time range to be able to measure distance
#start = get_positions_at_time(match["positions"], kick["time"] - offset)
#end = get_positions_at_time(match["positions"], kick["time"])
#getting positions
positions = get_positions_in_range(match["positions"], kick["time"] - offset,kick["time"])
#print(positions)
player_pos = []
#A list of lists with only info about player we want
for i in positions:
if i['name'] == player_name: #5th column in df is the id field
player_pos.append(i)
#print(player_pos)
#Getting the time
if len(player_pos) > 0:
last = len(player_pos)-1#getting last index)
time = player_pos[last]['time'] - player_pos[0]['time']
#Getting the distance
distance = stadium_distance(player_pos[0]['x'],player_pos[0]['y'],player_pos[last]['x'],player_pos[last]['y'])
#dist_formula(player_pos[0]['x'],player_pos[0]['y'],player_pos[last]['x'],player_pos[last]['y'])
print("dist:" + str(distance))
print("time:" + str(time))
#Returns speed
#NEED TO CHANGE TIME INTO SECONDS SO THAT IT IS CONSTANT AND NOT DIVIDING BY DIFF VALS
return distance/time
else:
return 0
name = 'Player 52'
print(speed_player(match,example, name, offset=9))
def defender_feature_weighted(match,kick,stadium,dist):
'''Figuring out the closest defender and num of defenders for a kick
Note: This is weighted so that defenders that are close to the player/ball or the goal count as 1.5 rather than 1
Args:
match: Which match it is
kick: Which kick we want to measure
dist: Set distance to consider a player pressuring
Returns:
List that contains the distance of the closest defender and the number of defenders (weighted)
'''
positions = get_positions_at_time(match["positions"], kick["time"])
closest_defender = float('inf')
defenders_pressuring = 0
ret = [0,0]
for person in positions:
if person['team'] is not kick['fromTeam'] and person['type'] == "player":
defender_dist = stadium_distance(kick['fromX'],kick['fromY'],person['x'],person['y'])
#((kick['fromX'] - person['x'])**2 + (kick['fromY'] - person['y'])**2)**(1/2)
if defender_dist < closest_defender:
closest_defender = defender_dist
ret[0] = closest_defender
if defender_dist <= dist:
#Checking distances for weights
#Not sure what to call "close" to player or goal (currently doing 5)
post = get_opposing_goalpost(stadium, "red")
goal_dist = stadium_distance(post['mid'][0], post['mid'][1] ,person['x'],person['y'])
if defender_dist <= 5:
defenders_pressuring += 1.5
elif goal_dist <= 5:
defenders_pressuring += 1.5
else:
defenders_pressuring += 1
ret[1] = defenders_pressuring
return ret
#Testing!!
```
# Features
Some ideas:
Boolean (1 or 0) depending on if shot should intersect goal w/ clear shot
Is player in the way: Go through all player positions and check if it hits any point on the intersection line (by checking = points or within the range based on the raduis of player (Return 1 or 0 if player is in the way of the shot)
Using speed of player and speed of ball, check if it it can go the distance to get to the goal
```
def shot_on_goal(match, kick, intersect, stadium):
'''Figuring out if shot is going into the goal or not
Args:
match: Which match it is
kick: Which kick we want to measure
intersect: x,y of the shot intersection
stadium: Which staduim was it played on
Returns:
1 if shot is on goal, .5 if it hits the post, and 0 if it isn't on goal
'''
goal_posts = get_opposing_goalpost(stadium, kick['team'])['posts']
if intersect['y'] > goal_posts[0]['y'] and intersect['y'] < goal_posts[1]['y']:
return 1
elif intersect['y'] == goal_posts[0]['y'] or intersect['y'] == goal_posts[1]['y']:
#hits posts
return .5
else:
return 0
print(shot_on_goal(match, example, intersect, stadium))
def blocking_players(match, kick, intersect, ball_pos, stadium):
'''Find if there is a player blocking the shot
Args:
match: Which match it is
kick: Which kick we want to measure
intersect: x,y of the shot intersection
ball_pos: Point for the balls position
stadium: Which staduim was it played on
Returns:
1 if shot is on goal, .5 if it hits the post, and 0 if it isn't on goal
'''
#How will we account for raduis? --> one case is if the center of a player is on a point on the line but what if it isnt the center
#Get position of all players at the frame we get the intersection from and then check all of their positions -- how will we know the frame
#line - ax + by = c --> how to get the a,b,c constants
#point-slope form to linear equation
#y - y1 = m(x-x1) --> ax + by = c
#Tricky -- a can't be a fraction
def check_collision(a, b, c, x, y, radius):
'''Checking if shot line intersection is colliding with a player
Args:
a,b,c = constants
x: x position of the center of the player
y: y position of the center of the player
raduis: Radius of the player
Returns:
1 if shot is on goal, .5 if it hits the post, and 0 if it isn't on goal
'''
# Find dist of line from center of circle
dist = ((abs(a * x + b * y + c)) /
math.sqrt(a * a + b * b))
# Checking if the distance is less
# than, greater than or equal to radius.
if radius == dist or radius > dist:
print("colliding")
else:
print("Not colliding")
import numpy as np
import matplotlib.pyplot as plt
# Define the known points
x = [100, 400]
y = [240, 265]
# Calculate the coefficients. This line answers the initial question.
coefficients = np.polyfit(x, y, 1)
# Print the findings
print ('a =', coefficients[0])
print ('b =', coefficients[1])
# Let's compute the values of the line...
polynomial = np.poly1d(coefficients)
x_axis = np.linspace(0,500,100)
y_axis = polynomial(x_axis)
# ...and plot the points and the line
plt.plot(x_axis, y_axis)
plt.plot( x[0], y[0], 'go' )
plt.plot( x[1], y[1], 'go' )
plt.grid('on')
plt.show()
```
|
github_jupyter
|
# Add this so we can import our haxml code from outside the notebooks folder.
import sys
sys.path.append("../")
from haxml.prediction import (
generate_rows_demo
)
from haxml.utils import (
load_match,
inflate_match,
get_stadiums,
get_matches_metadata,
to_clock,
get_opposing_goalpost,
stadium_distance,
angle_from_goal,
is_scored_goal
)
from haxml.viz import (
plot_stadium,
zoom_stadium,
plot_positions
)
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import json
import pandas as pd
stadiums = get_stadiums("../data/stadiums.json")
metadata = get_matches_metadata("../data/matches_metadata.csv")
meta_df = pd.DataFrame(metadata)
meta_df["scored_goals_total"] = meta_df["scored_goals_red"] + meta_df["scored_goals_blue"]
meta_df.sort_values(by="scored_goals_total", ascending=False).head()
def generate_rows_angles(match, stadium):
"""
Generates target and features for each kick in the match.
Produces two features for demo classifiers:
goal_distance: Distance from where ball was kicked to goal midpoint.
goal_angle: Angle (in radians) between straight shot from where ball was
kicked to goal midpoint.
Args:
match: Inflated match data (dict).
stadium: Stadium data (dict).
Returns:
Generator of dicts with values for each kick in the given match.
Includes prediction target "ag" (actual goals) which is 1 for a scored
goal (goal or error) and 0 otherwise, "index" which is the index of the
kick in the match kick list, and all the other features needed for
prediction and explanation.
"""
for i, kick in enumerate(match["kicks"]):
gp = get_opposing_goalpost(stadium, kick["fromTeam"])
x = kick["fromX"]
y = kick["fromY"]
gx = gp["mid"]["x"]
gy = gp["mid"]["y"]
dist = stadium_distance(x, y, gx, gy)
angle = angle_from_goal(x, y, gx, gy)
row = {
"ag": 1 if is_scored_goal(kick) else 0,
"index": i,
"time": kick["time"],
"x": x,
"y": y,
"from_name": kick["fromName"],
"from_team": kick["fromTeam"],
"goal_x": gx,
"goal_y": gy,
"goal_distance": dist,
"goal_angle": angle,
"team": kick["fromTeam"],
"stadium": match["stadium"]
}
yield row
#1800 (weirddd)
#1097 2 work and one doesnt
#Going 2 indeces down works so much detter with this match 222
#Match 420 has the first goal as a bit off and the other 3 as accurate (first goals least dist is 105?)
meta = metadata[1097]
meta
key = meta["match_id"]
infile = "../data/packed_matches/{}.json".format(key)
stadium = stadiums[meta["stadium"]]
match = load_match(infile)
row_gen = generate_rows_angles(match, stadium)
df = pd.DataFrame(row_gen)
df["match"] = key
df.head()
pd.DataFrame(match["positions"]).head(20)
example = df.query("ag == 1").to_dict(orient="records")[0]
example
help(sorted)
def get_positions_at_time(positions, t):
"""
Return a list of positions (dicts) closest to, but before time t.
"""
# Assume positions list is already sorted.
# frame is a list of positions (dicts) that have the same timestamp.
frame = []
frame_time = 0.0
for pos in positions:
# If we passed the target time t, return the frame we have
if pos["time"] > t:
break
# If this positions is part of the current frame, add it
if pos["time"] == frame_time:
frame.append(pos)
# If the current frame is over, make a new frame and add this position to it
else:
frame = []
frame.append(pos)
frame_time = pos["time"]
return frame
def get_positions_in_range(positions, start, end):
"""
Return a list of positions (dicts) between start and end (inclusive).
"""
assert start <= end, "Time `start` must be before `end`."
def is_in_time_range(pos):
return pos["time"] >= start and pos["time"] <= end
return list(filter(is_in_time_range, positions))
offset = 2 # seconds
print("Get positions at {}.".format(example["time"] - offset))
r1 = get_positions_at_time(match["positions"], example["time"])
pd.DataFrame(r1)
print("Get positions between {} and {}.".format(example["time"] - offset, example["time"]))
r2 = get_positions_in_range(match["positions"], example["time"] - offset, example["time"])
pd.DataFrame(r2)
help(plot_positions)
fig, ax = plot_positions(r1, stadium)
fig.set_size_inches(*zoom_stadium(stadium["bounds"]))
for pos in r1:
ax.text(pos["x"], pos["y"], pos["name"])
fig
fig, ax = plot_positions(r2, stadium)
fig.set_size_inches(*zoom_stadium(stadium["bounds"]))
# Most recent position for each player
# Key: Name, Value: position (dict)
last_pos = {}
for pos in r2:
last_pos[pos["name"]] = pos
for pos in last_pos.values():
ax.text(pos["x"], pos["y"], pos["name"])
fig
df.query("ag == 1")
bounce = df.query("ag == 1").to_dict(orient="records")[1]
bounce
end_time = bounce["time"] - 0.1
start_time = end_time - 4
print(f"Start: {to_clock(start_time)}")
print(f"End : {to_clock(end_time)}")
time_range = get_positions_in_range(match["positions"], start_time, end_time)
fig, ax = plot_positions(time_range, stadium)
fig.set_size_inches(*zoom_stadium(stadium["bounds"]))
# Most recent position for each player
# Key: Name, Value: position (dict)
last_pos = {}
for pos in time_range:
last_pos[pos["name"]] = pos
for pos in last_pos.values():
ax.text(pos["x"], pos["y"], pos["name"])
fig
# We can get the X-coordinate of the goal like this:
# If your team is red, then you should be shooting at the blue goal
gp_blue = get_opposing_goalpost(stadium, "red")
# We can just use the coordinates of the midpoint:
print(gp_blue["mid"])
# After we get an intersection point, we want to know if it is between the posts
print(gp_blue["posts"])
# We can get the bounds of the field:
print(stadium["bounds"])
""" my brainstorming lol"""
def player_ball_angle(x,y,bx,by):
'''Finds the expected angle of the shot (Using midpoints)
Args:
x: x position of player
y: y position of player
bx: x position ball
by: y position of ball
Returns:
Int that represents the angle of the shot (which is the angle of the player to the ball)
'''
# abs of x distance and y distnace between ball and player
dx = float(abs(x - gx))
dy = float(abs(y - gy))
# Avoid division by zero error.
angle = math.atan(dx / dy) if dy > 0 else math.pi / 2.0
return angle
def shot_intersection(match,kick, stadium, offset):
'''Finds where the ball would intersect
Args:
match: Which match it is
kick: Which kick we want to measure
staduim: What stadium size it is (so we know where the goals and bounds are)
Returns:
Int 1 or 0 if the ball is going towars the goal or not
'''
#Getting last frame before the kick
print("Calculate intersection at time: {}".format(kick["time"] - offset))
#frame = get_positions_at_time(match["positions"], kick["time"] - offset)
#Using in range and tracing back to see what frame was right before it left the foot
frame = get_positions_in_range(match["positions"], kick["time"] - offset,kick["time"])
#A list of lists with only info about player we want and ball
shooter_frames = []
ball_frames = []
print(kick['from_name'])
for i in frame:
if i['name'] == kick['from_name']:
shooter_frames.append(i)
elif i['type'] == 'ball':
ball_frames.append(i)
print((shooter_frames))
print((ball_frames))
#print(shooter_frames)
#print(ball_frames)
#picking frame with least dist
least_dist = float(math.inf)
btwn_dist = {'upper': 1, 'lower':1}
player_position = {}
ball_position = {}
length = min(len(shooter_frames),len(ball_frames))
set_dist = 30 #ball and player are at least get within 30 units then we assume that it was kicked
for i in range(length-1,-1,-1): #frame len of ball and shooter should be the same
dist = stadium_distance(shooter_frames[i]['x'],shooter_frames[i]['y'],ball_frames[i]['x'],ball_frames[i]['y'])
print(dist)
if dist <= set_dist:
player_position = shooter_frames[i]
ball_position = ball_frames[i]
break
if not bool(ball_position) or not bool(player_position): #The dictionaries were not populated yet so default to getting least distance
print("using least diff")
for i in range(length-1,-1,-1): #frame len of ball and shooter should be the same
dist = stadium_distance(shooter_frames[i]['x'],shooter_frames[i]['y'],ball_frames[i]['x'],ball_frames[i]['y'])
print(dist)
if dist <= least_dist:
player_position = shooter_frames[i]
ball_position = ball_frames[i]
least_dist = dist
else: #stopped decreasing
print(shooter_frames[i]['time'])
print("breaking")
break
#print("least distance is" + str(least_dist))
last = len(shooter_frames)-1
print("last frame is "+ str(stadium_distance(shooter_frames[0]['x'],shooter_frames[0]['y'],ball_frames[0]['x'],ball_frames[0]['y'])))
#print(frame)
print(player_position)
print(ball_position)
#Getting goal positions
goal_mid = get_opposing_goalpost(stadium, kick['team'])['mid']
#print(goal_mid)
#Extend line from shot angle (can't extend lines easily)
y_val = point_slope(
player_position,
slope(player_position['x'], player_position['y'], ball_position['x'], ball_position['y']),
goal_mid['x']
)
#Checking if the projection between the posts
intersect = { 'x': goal_mid['x'], 'y': y_val }
return player_position, ball_position, intersect
def point_slope(p, slope,x_goal):
#y - y1 = m(x-x1) --> y=mx-mx1+y1 (returning the y)
y_val = (slope*x_goal)-(slope*p['x'])+p['y']
return y_val
def slope(x1, y1, x2, y2):
m = (y2-y1)/(x2-x1)
return m
example = df.query("ag == 1").to_dict(orient="records")[0]
example
end_time = example["time"]
start_time = end_time - 2
print(f"Start: {start_time}")
print(f"End : {end_time}")
time_range = get_positions_in_range(match["positions"], start_time, end_time)
player_pos, ball_pos, intersect = shot_intersection(match,example,stadium, offset=1)
print(intersect)
fig, ax = plot_positions(time_range, stadium)
fig.set_size_inches(*zoom_stadium(stadium["bounds"]))
on_goal = shot_on_goal(match, example, intersect, stadium)
print("on_goal feature " + str(on_goal))
# Most recent position for each player
# Key: Name, Value: position (dict)
last_pos = {}
for pos in time_range:
last_pos[pos["name"]] = pos
for pos in last_pos.values():
ax.text(pos["x"], pos["y"], pos["name"])
ax.plot(
[player_pos['x'], ball_pos['x'], intersect['x']],
[player_pos['y'], ball_pos['y'], intersect['y']],
color="green", linewidth=1
)
fig
import math
def dist_formula(x1,y1,x2,y2):
dist = math.sqrt( ((x1-x2)**2)+((y1-y2)**2) )
return dist
def speed_player(match,kick,player_name, offset):
'''' Speed of the player
Args:
match: Which match it is
kick: Which kick we want to measure
player_name: What player do we want to measure the speed for
Returns:
Int that represents the speed of the player
'''
#Getting time range to be able to measure distance
#start = get_positions_at_time(match["positions"], kick["time"] - offset)
#end = get_positions_at_time(match["positions"], kick["time"])
#getting positions
positions = get_positions_in_range(match["positions"], kick["time"] - offset,kick["time"])
#print(positions)
player_pos = []
#A list of lists with only info about player we want
for i in positions:
if i['name'] == player_name: #5th column in df is the id field
player_pos.append(i)
#print(player_pos)
#Getting the time
if len(player_pos) > 0:
last = len(player_pos)-1#getting last index)
time = player_pos[last]['time'] - player_pos[0]['time']
#Getting the distance
distance = stadium_distance(player_pos[0]['x'],player_pos[0]['y'],player_pos[last]['x'],player_pos[last]['y'])
#dist_formula(player_pos[0]['x'],player_pos[0]['y'],player_pos[last]['x'],player_pos[last]['y'])
print("dist:" + str(distance))
print("time:" + str(time))
#Returns speed
#NEED TO CHANGE TIME INTO SECONDS SO THAT IT IS CONSTANT AND NOT DIVIDING BY DIFF VALS
return distance/time
else:
return 0
name = 'Player 52'
print(speed_player(match,example, name, offset=9))
def defender_feature_weighted(match,kick,stadium,dist):
'''Figuring out the closest defender and num of defenders for a kick
Note: This is weighted so that defenders that are close to the player/ball or the goal count as 1.5 rather than 1
Args:
match: Which match it is
kick: Which kick we want to measure
dist: Set distance to consider a player pressuring
Returns:
List that contains the distance of the closest defender and the number of defenders (weighted)
'''
positions = get_positions_at_time(match["positions"], kick["time"])
closest_defender = float('inf')
defenders_pressuring = 0
ret = [0,0]
for person in positions:
if person['team'] is not kick['fromTeam'] and person['type'] == "player":
defender_dist = stadium_distance(kick['fromX'],kick['fromY'],person['x'],person['y'])
#((kick['fromX'] - person['x'])**2 + (kick['fromY'] - person['y'])**2)**(1/2)
if defender_dist < closest_defender:
closest_defender = defender_dist
ret[0] = closest_defender
if defender_dist <= dist:
#Checking distances for weights
#Not sure what to call "close" to player or goal (currently doing 5)
post = get_opposing_goalpost(stadium, "red")
goal_dist = stadium_distance(post['mid'][0], post['mid'][1] ,person['x'],person['y'])
if defender_dist <= 5:
defenders_pressuring += 1.5
elif goal_dist <= 5:
defenders_pressuring += 1.5
else:
defenders_pressuring += 1
ret[1] = defenders_pressuring
return ret
#Testing!!
def shot_on_goal(match, kick, intersect, stadium):
'''Figuring out if shot is going into the goal or not
Args:
match: Which match it is
kick: Which kick we want to measure
intersect: x,y of the shot intersection
stadium: Which staduim was it played on
Returns:
1 if shot is on goal, .5 if it hits the post, and 0 if it isn't on goal
'''
goal_posts = get_opposing_goalpost(stadium, kick['team'])['posts']
if intersect['y'] > goal_posts[0]['y'] and intersect['y'] < goal_posts[1]['y']:
return 1
elif intersect['y'] == goal_posts[0]['y'] or intersect['y'] == goal_posts[1]['y']:
#hits posts
return .5
else:
return 0
print(shot_on_goal(match, example, intersect, stadium))
def blocking_players(match, kick, intersect, ball_pos, stadium):
'''Find if there is a player blocking the shot
Args:
match: Which match it is
kick: Which kick we want to measure
intersect: x,y of the shot intersection
ball_pos: Point for the balls position
stadium: Which staduim was it played on
Returns:
1 if shot is on goal, .5 if it hits the post, and 0 if it isn't on goal
'''
#How will we account for raduis? --> one case is if the center of a player is on a point on the line but what if it isnt the center
#Get position of all players at the frame we get the intersection from and then check all of their positions -- how will we know the frame
#line - ax + by = c --> how to get the a,b,c constants
#point-slope form to linear equation
#y - y1 = m(x-x1) --> ax + by = c
#Tricky -- a can't be a fraction
def check_collision(a, b, c, x, y, radius):
'''Checking if shot line intersection is colliding with a player
Args:
a,b,c = constants
x: x position of the center of the player
y: y position of the center of the player
raduis: Radius of the player
Returns:
1 if shot is on goal, .5 if it hits the post, and 0 if it isn't on goal
'''
# Find dist of line from center of circle
dist = ((abs(a * x + b * y + c)) /
math.sqrt(a * a + b * b))
# Checking if the distance is less
# than, greater than or equal to radius.
if radius == dist or radius > dist:
print("colliding")
else:
print("Not colliding")
import numpy as np
import matplotlib.pyplot as plt
# Define the known points
x = [100, 400]
y = [240, 265]
# Calculate the coefficients. This line answers the initial question.
coefficients = np.polyfit(x, y, 1)
# Print the findings
print ('a =', coefficients[0])
print ('b =', coefficients[1])
# Let's compute the values of the line...
polynomial = np.poly1d(coefficients)
x_axis = np.linspace(0,500,100)
y_axis = polynomial(x_axis)
# ...and plot the points and the line
plt.plot(x_axis, y_axis)
plt.plot( x[0], y[0], 'go' )
plt.plot( x[1], y[1], 'go' )
plt.grid('on')
plt.show()
| 0.700383 | 0.811751 |
# Double 7's (Short Term Trading Strategies that Work)
1. The SPY is above its 200-day moving average
2. The SPY closes at a 7-day low, buy.
3. If the SPY closes at a 7-day high, sell your long position.
```
# use future imports for python 3.x forward compatibility
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
# other imports
import pandas as pd
import matplotlib.pyplot as plt
import datetime
from talib.abstract import *
# project imports
import pinkfish as pf
import strategy
# format price data
pd.options.display.float_format = '{:0.2f}'.format
%matplotlib inline
# set size of inline plots
'''note: rcParams can't be in same cell as import matplotlib
or %matplotlib inline
%matplotlib notebook: will lead to interactive plots embedded within
the notebook, you can zoom and resize the figure
%matplotlib inline: only draw static images in the notebook
'''
plt.rcParams["figure.figsize"] = (10, 7)
```
Some global data
```
#symbol = '^GSPC'
#symbol = 'SPY'
#symbol = 'DIA'
#symbol = 'QQQ'
#symbol = 'IWM'
#symbol = 'TLT'
#symbol = 'GLD'
#symbol = 'AAPL'
#symbol = 'BBRY'
#symbol = 'GDX'
symbol = 'OIH'
capital = 10000
start = datetime.datetime(2000, 1, 1)
end = datetime.datetime.now()
```
Define high low trade periods
```
period = 7
```
Run Strategy
```
s = strategy.Strategy(symbol, capital, start, end, period)
s.run()
```
Retrieve log DataFrames
```
s.tlog, s.dbal = s.get_logs()
s.stats = s.stats()
s.tlog.tail(100)
s.dbal.tail()
```
Generate strategy stats - display all available stats
```
pf.print_full(s.stats)
```
Equity curve
Run Benchmark, Retrieve benchmark logs, and Generate benchmark stats
```
benchmark = pf.Benchmark(symbol, capital, s._start, s._end)
benchmark.run()
benchmark.tlog, benchmark.dbal = benchmark.get_logs()
benchmark.stats = benchmark.stats()
```
Plot Equity Curves: Strategy vs Benchmark
```
pf.plot_equity_curve(s.dbal, benchmark=benchmark.dbal)
```
Plot Trades
```
pf.plot_trades(s.dbal, benchmark=benchmark.dbal)
```
Bar Graph: Strategy vs Benchmark
```
metrics = ('annual_return_rate',
'max_closed_out_drawdown',
'drawdown_annualized_return',
'drawdown_recovery',
'best_month',
'worst_month',
'sharpe_ratio',
'sortino_ratio',
'monthly_std')
df = pf.plot_bar_graph(s.stats, benchmark.stats, *metrics)
df
returns = s.dbal['close']
returns.tail()
benchmark_returns = benchmark.dbal['close']
benchmark_returns.tail()
pf.prettier_graphs(returns, benchmark_returns, label1='Strategy', label2='Benchmark', points_to_plot=500)
```
|
github_jupyter
|
# use future imports for python 3.x forward compatibility
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
# other imports
import pandas as pd
import matplotlib.pyplot as plt
import datetime
from talib.abstract import *
# project imports
import pinkfish as pf
import strategy
# format price data
pd.options.display.float_format = '{:0.2f}'.format
%matplotlib inline
# set size of inline plots
'''note: rcParams can't be in same cell as import matplotlib
or %matplotlib inline
%matplotlib notebook: will lead to interactive plots embedded within
the notebook, you can zoom and resize the figure
%matplotlib inline: only draw static images in the notebook
'''
plt.rcParams["figure.figsize"] = (10, 7)
#symbol = '^GSPC'
#symbol = 'SPY'
#symbol = 'DIA'
#symbol = 'QQQ'
#symbol = 'IWM'
#symbol = 'TLT'
#symbol = 'GLD'
#symbol = 'AAPL'
#symbol = 'BBRY'
#symbol = 'GDX'
symbol = 'OIH'
capital = 10000
start = datetime.datetime(2000, 1, 1)
end = datetime.datetime.now()
period = 7
s = strategy.Strategy(symbol, capital, start, end, period)
s.run()
s.tlog, s.dbal = s.get_logs()
s.stats = s.stats()
s.tlog.tail(100)
s.dbal.tail()
pf.print_full(s.stats)
benchmark = pf.Benchmark(symbol, capital, s._start, s._end)
benchmark.run()
benchmark.tlog, benchmark.dbal = benchmark.get_logs()
benchmark.stats = benchmark.stats()
pf.plot_equity_curve(s.dbal, benchmark=benchmark.dbal)
pf.plot_trades(s.dbal, benchmark=benchmark.dbal)
metrics = ('annual_return_rate',
'max_closed_out_drawdown',
'drawdown_annualized_return',
'drawdown_recovery',
'best_month',
'worst_month',
'sharpe_ratio',
'sortino_ratio',
'monthly_std')
df = pf.plot_bar_graph(s.stats, benchmark.stats, *metrics)
df
returns = s.dbal['close']
returns.tail()
benchmark_returns = benchmark.dbal['close']
benchmark_returns.tail()
pf.prettier_graphs(returns, benchmark_returns, label1='Strategy', label2='Benchmark', points_to_plot=500)
| 0.727975 | 0.792585 |
# Preprocessing the Dataset
```
# Python Standard Library
import random
import sys
import os
# External Modules
import numpy as np
import h5py
from datetime import datetime
from skimage.filters import threshold_otsu
from matplotlib import pyplot as plt
from preprocessing.datamodel import SlideManager
from preprocessing.processing import split_negative_slide, split_positive_slide, create_tumor_mask, rgb2gray
from preprocessing.util import TileMap
```
### Data
The whole data sets have the following sizes:
- CAMELYON16 *(~715 GiB)*
```
data
├── CAMELYON16
│ ├── training
│ │ ├── lesion_annotations
│ │ │ └── tumor_001.xml - tumor_110.xml
│ │ ├── normal
│ │ │ └── normal_001.tif - normal_160.tif
│ │ └── tumor
│ │ └── tumor_001.tif - tumor_110.tif
│ └── test
│ ├── lesion_annotations
│ │ └── test_001.xml - tumor_110.xml
│ └── images
│ └── test_001.tif - normal_160.tif
```
**Note:** For the `SlideManager` class also uppercase and lowercase matters, especially to map annotations to tumor slides, so be consistant in file labeling.
## Dataset Generation
- Process all files from the CAMELYON16 data set
- No overlap for negative tiles
- Minimum of 20% tissue in tiles for normal slides
- Minimum of 60% tumorours tissue for positive slides
- Slide zoom level 0 (0-9, 0 beeing the highest zoom)
- Tile_size of 312x312
- 156 pixel overlap for tumorous (positive tiles) since they are scarce
- We save up to 1000 tiles per slide
- Processing in this notebook will take approximately ~60 hours [\*]
- Classifying the tiles of the WSIs of the test set will later take ~1 hour per WSI [\*]
Most importantly, we will save all tiles from all WSIs into a single HDF5 file. This is crucial because when accessing the data later for training, most time is consumed when opening a file. Additionally, the training works better, when a single batch (e.g. 100 tiles), is as heterogenous as the original data. So when we want to read 100 random tiles, we ideally want to read 100 tiles from 100 different slides and we do not want to open 100 different files to do so.
**Background Information:**
Depending on the staining process and the slide scanner, the slides can differ quite a lot in color. Therefore a batch containing 100 tiles from one slide only will most likely prevent the CNN from generalizing well.
```
### EDIT THIS CELL:
### Assign the path to your CAMELYON16 data
CAM_BASE_DIR = '/media/nico/data/fourthbrain/project/'
### Do not edit this cell
CAM16_DIR = CAM_BASE_DIR + 'CAMELYON16/'
GENERATED_DATA = CAM_BASE_DIR + 'output/'
mgr = SlideManager(cam16_dir=CAM16_DIR)
n_slides= len(mgr.slides)
level = 0
tile_size = 312
### Execute this cell
poi = 0.20 ### 20% of negative tiles must contain tissue (in contrast to slide background)
poi_tumor = 0.60 ### 60% of pos tiles must contain metastases
### to not have too few positive tile, we use half overlapping tilesize
overlap_tumor = tile_size // 2
### we have enough normal tissue, so negative tiles will be less of a problem
overlap = 0.0
max_tiles_per_slide = 1000
```
**Hint:**
* As mentioned above, the next two blocks will take alot of time, depending on the choosen option. Before starting to preprocess the full data set it might help to process just a few slides, e.g. two normal and two tumor, to test whether everything works as expected.
* In some rare cases jupyter notebook can become unstable when running for hours. It might be a good idea to run the python program from shell instead. To do so export the notebook as python program. Go to `File --> Download as --> Python`
### Create Individual Files per WSI
To make this process resumable if anything fails, we will first create one HDF5-File for each WSI. This way, if anything fails, like power failure, Python Kernel dying, you can just delete the very last file, which will most likely be corrupted, and resume the process by reexecuting the cells.
```
tiles_pos = 0
num_slides = 2 #len(mgr.annotated_slides)
for i in range(num_slides):
print("Working on {}".format(mgr.annotated_slides[i].name))
# try:
filename = '{}/{}_{}x{}_poi{}_poiTumor{}_level{}.hdf5'.format(GENERATED_DATA, mgr.annotated_slides[i].name, tile_size, tile_size,
poi, poi_tumor, level)
# 'w-' creates file, fails if exists
h5 = h5py.File(filename, "w-", libver='latest')
# create a new and unconsumed tile iterator
tile_iter = split_positive_slide(mgr.annotated_slides[i], level=level,
tile_size=tile_size, overlap=overlap_tumor,
poi_threshold=poi_tumor)
tiles_batch = list()
for tile, bounds in tile_iter:
if len(tiles_batch) % 10 == 0: print('positive slide #:', i, 'tiles so far:', len(tiles_batch))
if len(tiles_batch) > max_tiles_per_slide: break
tiles_batch.append(tile)
# creating a date set in the file
dset = h5.create_dataset(mgr.annotated_slides[i].name,
(len(tiles_batch), tile_size, tile_size, 3),
dtype=np.uint8,
data=np.array(tiles_batch),
compression=0)
h5.close()
tiles_pos += len(tiles_batch)
print(datetime.now(), i, '/', num_slides, ' tiles ', len(tiles_batch))
print('pos tiles total: ', tiles_pos)
# except:
# print('slide nr {}/{} failed'.format(i, len(mgr.annotated_slides)))
# print(sys.exc_info()[0])
tiles_neg = 0
num_slides = 2 #len(mgr.annotated_slides)
for i in range(num_slides):
try:
filename = '{}/{}_{}x{}_poi{}_poiTumor{}_level{}.hdf5'.format(GENERATED_DATA, mgr.negative_slides[i].name, tile_size, tile_size,
poi, poi_tumor, level)
# 'w-' creates file, fails if exists
h5 = h5py.File(filename, "w-", libver='latest')
# load the slide into numpy array
arr = np.asarray(mgr.negative_slides[i].get_full_slide(level=4))
# convert it to gray scale
arr_gray = rgb2gray(arr)
# calculate otsu threshold
threshold = threshold_otsu(arr_gray)
# create a new and unconsumed tile iterator
# because we have so many negative slides we do not use overlap
tile_iter = split_negative_slide(mgr.negative_slides[i], level=level,
otsu_threshold=threshold,
tile_size=tile_size, overlap=overlap,
poi_threshold=poi)
tiles_batch = []
for tile, bounds in tile_iter:
if len(tiles_batch) % 10 == 0: print('neg slide:', i, 'tiles so far:', len(tiles_batch))
if len(tiles_batch) > max_tiles_per_slide: break
tiles_batch.append(tile)
# creating a date set in the file
dset = h5.create_dataset(mgr.negative_slides[i].name,
(len(tiles_batch), tile_size, tile_size, 3),
dtype=np.uint8,
data=np.array(tiles_batch),
compression=0)
h5.close()
tiles_neg += len(tiles_batch)
print(datetime.now(), i, '/', num_slides, ' tiles ', len(tiles_batch))
print('neg tiles total: ', tiles_neg)
except:
print('slide nr {}/{} failed'.format(i, len(mgr.negative_slides)))
print(sys.exc_info()[0])
```
### Create Single File
Now we will create a new, and final HDF5 file to contain all tiles of all WSIs we just created. The benefit of this is to further reduce reading time, as opening a file needs some time and this way we just need to open one single file.
```
single_file = '{}/all_wsis_{}x{}_poi{}_poiTumor{}_level{}.hdf5'.format(GENERATED_DATA, tile_size, tile_size,
poi, poi_tumor, level)
h5_single = h5py.File(single_file, 'w')
for f in os.listdir(GENERATED_DATA):
if f.startswith('normal_') or f.startswith('tumor_'):
filename = GENERATED_DATA + f
with h5py.File(filename, 'r') as h5:
for key in h5.keys():
print('processing: "{}", shape: {}'.format(key, h5[key].shape))
if h5[key].shape[0] > 0: ### dont create dsets for WSIs with 0 tiles
dset = h5_single.create_dataset(key,
h5[key].shape,
dtype=np.uint8,
data=h5[key][:],
compression=0)
h5_single.close()
print('Done combining hdf5 files')
```
## Summary and Outlook
The next step is to train a neural network with the preprocessed data to be able to classify and predict unseen tiles.
If you are curious how the `preprocessing` library you have used here works and how to use openslide, then take a look at the source code, it should not be too hard to understand the code. Note:
* For negative slides: we use Otsu thresholding to distignuish between slide background and tissue
* For positive slides: we just use the xml-files, which include polygons for metastatic regions
|
github_jupyter
|
# Python Standard Library
import random
import sys
import os
# External Modules
import numpy as np
import h5py
from datetime import datetime
from skimage.filters import threshold_otsu
from matplotlib import pyplot as plt
from preprocessing.datamodel import SlideManager
from preprocessing.processing import split_negative_slide, split_positive_slide, create_tumor_mask, rgb2gray
from preprocessing.util import TileMap
data
├── CAMELYON16
│ ├── training
│ │ ├── lesion_annotations
│ │ │ └── tumor_001.xml - tumor_110.xml
│ │ ├── normal
│ │ │ └── normal_001.tif - normal_160.tif
│ │ └── tumor
│ │ └── tumor_001.tif - tumor_110.tif
│ └── test
│ ├── lesion_annotations
│ │ └── test_001.xml - tumor_110.xml
│ └── images
│ └── test_001.tif - normal_160.tif
### EDIT THIS CELL:
### Assign the path to your CAMELYON16 data
CAM_BASE_DIR = '/media/nico/data/fourthbrain/project/'
### Do not edit this cell
CAM16_DIR = CAM_BASE_DIR + 'CAMELYON16/'
GENERATED_DATA = CAM_BASE_DIR + 'output/'
mgr = SlideManager(cam16_dir=CAM16_DIR)
n_slides= len(mgr.slides)
level = 0
tile_size = 312
### Execute this cell
poi = 0.20 ### 20% of negative tiles must contain tissue (in contrast to slide background)
poi_tumor = 0.60 ### 60% of pos tiles must contain metastases
### to not have too few positive tile, we use half overlapping tilesize
overlap_tumor = tile_size // 2
### we have enough normal tissue, so negative tiles will be less of a problem
overlap = 0.0
max_tiles_per_slide = 1000
tiles_pos = 0
num_slides = 2 #len(mgr.annotated_slides)
for i in range(num_slides):
print("Working on {}".format(mgr.annotated_slides[i].name))
# try:
filename = '{}/{}_{}x{}_poi{}_poiTumor{}_level{}.hdf5'.format(GENERATED_DATA, mgr.annotated_slides[i].name, tile_size, tile_size,
poi, poi_tumor, level)
# 'w-' creates file, fails if exists
h5 = h5py.File(filename, "w-", libver='latest')
# create a new and unconsumed tile iterator
tile_iter = split_positive_slide(mgr.annotated_slides[i], level=level,
tile_size=tile_size, overlap=overlap_tumor,
poi_threshold=poi_tumor)
tiles_batch = list()
for tile, bounds in tile_iter:
if len(tiles_batch) % 10 == 0: print('positive slide #:', i, 'tiles so far:', len(tiles_batch))
if len(tiles_batch) > max_tiles_per_slide: break
tiles_batch.append(tile)
# creating a date set in the file
dset = h5.create_dataset(mgr.annotated_slides[i].name,
(len(tiles_batch), tile_size, tile_size, 3),
dtype=np.uint8,
data=np.array(tiles_batch),
compression=0)
h5.close()
tiles_pos += len(tiles_batch)
print(datetime.now(), i, '/', num_slides, ' tiles ', len(tiles_batch))
print('pos tiles total: ', tiles_pos)
# except:
# print('slide nr {}/{} failed'.format(i, len(mgr.annotated_slides)))
# print(sys.exc_info()[0])
tiles_neg = 0
num_slides = 2 #len(mgr.annotated_slides)
for i in range(num_slides):
try:
filename = '{}/{}_{}x{}_poi{}_poiTumor{}_level{}.hdf5'.format(GENERATED_DATA, mgr.negative_slides[i].name, tile_size, tile_size,
poi, poi_tumor, level)
# 'w-' creates file, fails if exists
h5 = h5py.File(filename, "w-", libver='latest')
# load the slide into numpy array
arr = np.asarray(mgr.negative_slides[i].get_full_slide(level=4))
# convert it to gray scale
arr_gray = rgb2gray(arr)
# calculate otsu threshold
threshold = threshold_otsu(arr_gray)
# create a new and unconsumed tile iterator
# because we have so many negative slides we do not use overlap
tile_iter = split_negative_slide(mgr.negative_slides[i], level=level,
otsu_threshold=threshold,
tile_size=tile_size, overlap=overlap,
poi_threshold=poi)
tiles_batch = []
for tile, bounds in tile_iter:
if len(tiles_batch) % 10 == 0: print('neg slide:', i, 'tiles so far:', len(tiles_batch))
if len(tiles_batch) > max_tiles_per_slide: break
tiles_batch.append(tile)
# creating a date set in the file
dset = h5.create_dataset(mgr.negative_slides[i].name,
(len(tiles_batch), tile_size, tile_size, 3),
dtype=np.uint8,
data=np.array(tiles_batch),
compression=0)
h5.close()
tiles_neg += len(tiles_batch)
print(datetime.now(), i, '/', num_slides, ' tiles ', len(tiles_batch))
print('neg tiles total: ', tiles_neg)
except:
print('slide nr {}/{} failed'.format(i, len(mgr.negative_slides)))
print(sys.exc_info()[0])
single_file = '{}/all_wsis_{}x{}_poi{}_poiTumor{}_level{}.hdf5'.format(GENERATED_DATA, tile_size, tile_size,
poi, poi_tumor, level)
h5_single = h5py.File(single_file, 'w')
for f in os.listdir(GENERATED_DATA):
if f.startswith('normal_') or f.startswith('tumor_'):
filename = GENERATED_DATA + f
with h5py.File(filename, 'r') as h5:
for key in h5.keys():
print('processing: "{}", shape: {}'.format(key, h5[key].shape))
if h5[key].shape[0] > 0: ### dont create dsets for WSIs with 0 tiles
dset = h5_single.create_dataset(key,
h5[key].shape,
dtype=np.uint8,
data=h5[key][:],
compression=0)
h5_single.close()
print('Done combining hdf5 files')
| 0.192577 | 0.901964 |
```
## CE 295 - Energy Systems and Control
# HW 3 : Optimal Economic Dispatch in Distribution Feeders with Renewables
# Oski Bear, SID 18681868
# Prof. Moura
# Last updated: February 25, 2018
# BEAR_OSKI_HW3.ipynb
import numpy as np
import matplotlib.pyplot as plt
from cvxpy import *
%matplotlib inline
import pandas as pd
## 13 Node IEEE Test Feeder Parameters
### Node (aka Bus) Data
# l_j^P: Active power consumption [MW]
l_P = np.array([ ])
# l_j^Q: Reactive power consumption [MVAr]
l_Q = np.array([ ])
# l_j^S: Apparent power consumption [MVA]
l_S = np.sqrt(l_P**2 + l_Q**2)
# s_j,max: Maximal generating power [MW]
s_max =
# c_j: Marginal generation cost [USD/MW]
c =
# V_min, V_max: Minimum and maximum nodal voltages [V]
v_min = 0.95
v_max = 1.05
### Edge (aka Line) Data
# r_ij: Resistance [p.u.]
r = np.array([[0, 0.007547918, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0.0041, 0, 0.007239685, 0, 0.007547918, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0.004343811, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0.003773959, 0, 0, 0.004322245, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0.00434686, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.004343157, 0.01169764],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
# x_ij: Reactance [p.u.]
x = np.array([[0, 0.022173236, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0.0064, 0, 0.007336076, 0, 0.022173236, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0.004401645, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0.011086618, 0, 0, 0.004433667, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0.002430473, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.004402952, 0.004490848],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
# I_max_ij: Maximal line current [p.u.]
I_max = np.array([[0, 3.0441, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1.4178, 0, 0.9591, 0, 3.0441, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 3.1275, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0.9591, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 3.0441, 3.1275, 0, 0.9591, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1.37193, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.9591, 1.2927],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
# A_ij: Adjacency matrix; A_ij = 1 if i is parent of j
A = # you will fill this out!
### Set Data
# List of node indices
j_idx = np.arange(13)
# \rho(j): Parent node of node j
rho = # you will fill this out!
## Problem 1
# Plot active and reactive power consumption
plt.figure(num=1, figsize=(15, 4), dpi=80, facecolor='w', edgecolor='k')
# create plot
plt.bar( )
plt.show()
## Problem 2
# Assumptions:
# - Disregard the entire network diagram
# - Balance supply and demand, without any network considerations
# - Goal is to minimize generation costs, given by c^T s
# Solve with CVXPY
# Define optimization vars
p = Variable(13)
q =
s =
# Define objective function
objective = Minimize( )
# Define constraints
# Apparent Power Limits
constraints = [ ]
# Balance power generation with power consumption
constraints += [ ]
# Loop over each node
for jj in j_idx:
# Non-negative power generation
# Compute apparent power from active & reactive power
# Define problem and solve
prob2 = Problem(objective, constraints)
prob2.solve()
# Output Results
print "------------------- PROBLEM 2 --------------------"
print "--------------------------------------------------"
print prob2.status
print "Minimum Generating Cost : %4.2f"%(prob2.value),"USD"
print " "
print "Node 0 [Grid] Gen Power : p_0 = %1.3f"%(p[0].value), "MW | q_0 = %1.3f"%(q[0].value), "MW | s_0 = %1.3f"%(s[0].value),"MW"
print "Node 3 [Gas] Gen Power : p_3 = %1.3f"%(p[3].value), "MW | q_3 = %1.3f"%(q[3].value), "MW | s_3 = %1.3f"%(s[3].value),"MW"
print "Node 9 [Solar] Gen Power : p_9 = %1.3f"%(p[9].value), "MW | q_9 = %1.3f"%(q[9].value), "MW | s_9 = %1.3f"%(s[9].value),"MW"
print " "
print "Total active power : %1.3f"%(sum(l_P)),"MW consumed | %1.3f"%(sum(p.value)),"MW generated"
print "Total reactive power : %1.3f"%(sum(l_Q)),"MVAr consumed | %1.3f"%(sum(q.value)),"MVAr generated"
print "Total apparent power : %1.3f"%(sum(l_S)),"MVA consumed | %1.3f"%(sum(s.value)),"MVA generated"
## Problem 3
# Assumptions:
# - Disregard L_ij, the squared magnitude of complex line current
# - Disregard nodal voltage equation
# - Disregard nodal voltage limits
# - Disregard maximum line current
# - Goal is to minimize generation costs, given by c^T s
# Solve with CVXPY
# Define optimization vars
# Define objective function
objective =
# Define constraints
# Apparent Power Limits
# Boundary condition for power line flows
constraints += [P[0,0] == 0,
Q[0,0] == 0]
# Loop over each node
for jj in j_idx:
# Parent node, i = \rho(j)
# Line Power Flows
# Compute apparent power from active & reactive power
# Define problem and solve
prob3 = Problem(objective, constraints)
prob3.solve()
# Output Results
print "------------------- PROBLEM 3 --------------------"
print "--------------------------------------------------"
print prob3.status
print "Minimum Generating Cost : %4.2f"%(prob3.value),"USD"
print " "
print "Node 0 [Grid] Gen Power : p_0 = %1.3f"%(p[0].value), "MW | q_0 = %1.3f"%(q[0].value), "MW | s_0 = %1.3f"%(s[0].value),"MW || mu_s0 = %3.0f"%(constraints[0].dual_value[0]), "USD/MW"
print "Node 3 [Gas] Gen Power : p_3 = %1.3f"%(p[3].value), "MW | q_3 = %1.3f"%(q[3].value), "MW | s_3 = %1.3f"%(s[3].value),"MW || mu_s4 = %3.0f"%(constraints[0].dual_value[3]), "USD/MW"
print "Node 9 [Solar] Gen Power : p_9 = %1.3f"%(p[9].value), "MW | q_9 = %1.3f"%(q[9].value), "MW | s_9 = %1.3f"%(s[9].value),"MW || mu_s9 = %3.0f"%(constraints[0].dual_value[9]), "USD/MW"
print " "
print "Total active power : %1.3f"%(sum(l_P)),"MW consumed | %1.3f"%(sum(p.value)),"MW generated"
print "Total reactive power : %1.3f"%(sum(l_Q)),"MVAr consumed | %1.3f"%(sum(q.value)),"MVAr generated"
print "Total apparent power : %1.3f"%(sum(l_S)),"MVA consumed | %1.3f"%(sum(s.value)),"MVA generated"
## Problem 4
# Assumptions:
# - Add back all previously disregarded terms and constraints
# - Relax squared line current equation into inequality
# - Goal is to minimize generation costs, given by c^T s
# Solve with CVXPY
# Define optimization vars
# Define objective function
# Define constraints
# Apparent Power Limits
# Nodal voltage limits
# Squared line current limits
# Boundary condition for power line flows
constraints += [P[0,0] == 0,
Q[0,0] == 0]
# Boundary condition for squared line current
constraints += [L[0,0] == 0]
# Fix node 0 voltage to be 1 "per unit" (p.u.)
constraints += [V[0] == 1]
# Loop over each node
for jj in j_idx:
# Parent node, i = \rho(j)
# Line Power Flows
# Nodal voltage
# Squared current magnitude on lines
# Compute apparent power from active & reactive power
# Define problem and solve
prob4 = Problem(objective, constraints)
prob4.solve()
# Output Results
print "------------------- PROBLEM 4 --------------------"
print "--------------------------------------------------"
print prob4.status
print "Minimum Generating Cost : %4.2f"%(prob4.value),"USD"
print " "
print "Node 0 [Grid] Gen Power : p_0 = %1.3f"%(p[0].value), "MW | q_0 = %1.3f"%(q[0].value), "MW | s_0 = %1.3f"%(s[0].value),"MW || mu_s0 = %3.0f"%(constraints[0].dual_value[0]), "USD/MW"
print "Node 3 [Gas] Gen Power : p_3 = %1.3f"%(p[3].value), "MW | q_3 = %1.3f"%(q[3].value), "MW | s_3 = %1.3f"%(s[3].value),"MW || mu_s4 = %3.0f"%(constraints[0].dual_value[3]), "USD/MW"
print "Node 9 [Solar] Gen Power : p_9 = %1.3f"%(p[9].value), "MW | q_9 = %1.3f"%(q[9].value), "MW | s_9 = %1.3f"%(s[9].value),"MW || mu_s9 = %3.0f"%(constraints[0].dual_value[9]), "USD/MW"
print " "
print "Total active power : %1.3f"%(sum(l_P)),"MW consumed | %1.3f"%(sum(p.value)),"MW generated"
print "Total reactive power : %1.3f"%(sum(l_Q)),"MVAr consumed | %1.3f"%(sum(q.value)),"MVAr generated"
print "Total apparent power : %1.3f"%(sum(l_S)),"MVA consumed | %1.3f"%(sum(s.value)),"MVA generated"
print " "
for jj in j_idx:
print "Node %2.0f"%(jj), "Voltage : %1.3f"%((V[jj].value)**0.5), "p.u."
## Problem 5
# Assumptions:
# - Assume solar generator at node 9 has uncertain power capacity
# - Goal is to minimize generation costs, given by c^T s, in face of uncertainty
```
|
github_jupyter
|
## CE 295 - Energy Systems and Control
# HW 3 : Optimal Economic Dispatch in Distribution Feeders with Renewables
# Oski Bear, SID 18681868
# Prof. Moura
# Last updated: February 25, 2018
# BEAR_OSKI_HW3.ipynb
import numpy as np
import matplotlib.pyplot as plt
from cvxpy import *
%matplotlib inline
import pandas as pd
## 13 Node IEEE Test Feeder Parameters
### Node (aka Bus) Data
# l_j^P: Active power consumption [MW]
l_P = np.array([ ])
# l_j^Q: Reactive power consumption [MVAr]
l_Q = np.array([ ])
# l_j^S: Apparent power consumption [MVA]
l_S = np.sqrt(l_P**2 + l_Q**2)
# s_j,max: Maximal generating power [MW]
s_max =
# c_j: Marginal generation cost [USD/MW]
c =
# V_min, V_max: Minimum and maximum nodal voltages [V]
v_min = 0.95
v_max = 1.05
### Edge (aka Line) Data
# r_ij: Resistance [p.u.]
r = np.array([[0, 0.007547918, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0.0041, 0, 0.007239685, 0, 0.007547918, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0.004343811, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0.003773959, 0, 0, 0.004322245, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0.00434686, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.004343157, 0.01169764],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
# x_ij: Reactance [p.u.]
x = np.array([[0, 0.022173236, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0.0064, 0, 0.007336076, 0, 0.022173236, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0.004401645, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0.011086618, 0, 0, 0.004433667, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0.002430473, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.004402952, 0.004490848],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
# I_max_ij: Maximal line current [p.u.]
I_max = np.array([[0, 3.0441, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1.4178, 0, 0.9591, 0, 3.0441, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 3.1275, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0.9591, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 3.0441, 3.1275, 0, 0.9591, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1.37193, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.9591, 1.2927],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
# A_ij: Adjacency matrix; A_ij = 1 if i is parent of j
A = # you will fill this out!
### Set Data
# List of node indices
j_idx = np.arange(13)
# \rho(j): Parent node of node j
rho = # you will fill this out!
## Problem 1
# Plot active and reactive power consumption
plt.figure(num=1, figsize=(15, 4), dpi=80, facecolor='w', edgecolor='k')
# create plot
plt.bar( )
plt.show()
## Problem 2
# Assumptions:
# - Disregard the entire network diagram
# - Balance supply and demand, without any network considerations
# - Goal is to minimize generation costs, given by c^T s
# Solve with CVXPY
# Define optimization vars
p = Variable(13)
q =
s =
# Define objective function
objective = Minimize( )
# Define constraints
# Apparent Power Limits
constraints = [ ]
# Balance power generation with power consumption
constraints += [ ]
# Loop over each node
for jj in j_idx:
# Non-negative power generation
# Compute apparent power from active & reactive power
# Define problem and solve
prob2 = Problem(objective, constraints)
prob2.solve()
# Output Results
print "------------------- PROBLEM 2 --------------------"
print "--------------------------------------------------"
print prob2.status
print "Minimum Generating Cost : %4.2f"%(prob2.value),"USD"
print " "
print "Node 0 [Grid] Gen Power : p_0 = %1.3f"%(p[0].value), "MW | q_0 = %1.3f"%(q[0].value), "MW | s_0 = %1.3f"%(s[0].value),"MW"
print "Node 3 [Gas] Gen Power : p_3 = %1.3f"%(p[3].value), "MW | q_3 = %1.3f"%(q[3].value), "MW | s_3 = %1.3f"%(s[3].value),"MW"
print "Node 9 [Solar] Gen Power : p_9 = %1.3f"%(p[9].value), "MW | q_9 = %1.3f"%(q[9].value), "MW | s_9 = %1.3f"%(s[9].value),"MW"
print " "
print "Total active power : %1.3f"%(sum(l_P)),"MW consumed | %1.3f"%(sum(p.value)),"MW generated"
print "Total reactive power : %1.3f"%(sum(l_Q)),"MVAr consumed | %1.3f"%(sum(q.value)),"MVAr generated"
print "Total apparent power : %1.3f"%(sum(l_S)),"MVA consumed | %1.3f"%(sum(s.value)),"MVA generated"
## Problem 3
# Assumptions:
# - Disregard L_ij, the squared magnitude of complex line current
# - Disregard nodal voltage equation
# - Disregard nodal voltage limits
# - Disregard maximum line current
# - Goal is to minimize generation costs, given by c^T s
# Solve with CVXPY
# Define optimization vars
# Define objective function
objective =
# Define constraints
# Apparent Power Limits
# Boundary condition for power line flows
constraints += [P[0,0] == 0,
Q[0,0] == 0]
# Loop over each node
for jj in j_idx:
# Parent node, i = \rho(j)
# Line Power Flows
# Compute apparent power from active & reactive power
# Define problem and solve
prob3 = Problem(objective, constraints)
prob3.solve()
# Output Results
print "------------------- PROBLEM 3 --------------------"
print "--------------------------------------------------"
print prob3.status
print "Minimum Generating Cost : %4.2f"%(prob3.value),"USD"
print " "
print "Node 0 [Grid] Gen Power : p_0 = %1.3f"%(p[0].value), "MW | q_0 = %1.3f"%(q[0].value), "MW | s_0 = %1.3f"%(s[0].value),"MW || mu_s0 = %3.0f"%(constraints[0].dual_value[0]), "USD/MW"
print "Node 3 [Gas] Gen Power : p_3 = %1.3f"%(p[3].value), "MW | q_3 = %1.3f"%(q[3].value), "MW | s_3 = %1.3f"%(s[3].value),"MW || mu_s4 = %3.0f"%(constraints[0].dual_value[3]), "USD/MW"
print "Node 9 [Solar] Gen Power : p_9 = %1.3f"%(p[9].value), "MW | q_9 = %1.3f"%(q[9].value), "MW | s_9 = %1.3f"%(s[9].value),"MW || mu_s9 = %3.0f"%(constraints[0].dual_value[9]), "USD/MW"
print " "
print "Total active power : %1.3f"%(sum(l_P)),"MW consumed | %1.3f"%(sum(p.value)),"MW generated"
print "Total reactive power : %1.3f"%(sum(l_Q)),"MVAr consumed | %1.3f"%(sum(q.value)),"MVAr generated"
print "Total apparent power : %1.3f"%(sum(l_S)),"MVA consumed | %1.3f"%(sum(s.value)),"MVA generated"
## Problem 4
# Assumptions:
# - Add back all previously disregarded terms and constraints
# - Relax squared line current equation into inequality
# - Goal is to minimize generation costs, given by c^T s
# Solve with CVXPY
# Define optimization vars
# Define objective function
# Define constraints
# Apparent Power Limits
# Nodal voltage limits
# Squared line current limits
# Boundary condition for power line flows
constraints += [P[0,0] == 0,
Q[0,0] == 0]
# Boundary condition for squared line current
constraints += [L[0,0] == 0]
# Fix node 0 voltage to be 1 "per unit" (p.u.)
constraints += [V[0] == 1]
# Loop over each node
for jj in j_idx:
# Parent node, i = \rho(j)
# Line Power Flows
# Nodal voltage
# Squared current magnitude on lines
# Compute apparent power from active & reactive power
# Define problem and solve
prob4 = Problem(objective, constraints)
prob4.solve()
# Output Results
print "------------------- PROBLEM 4 --------------------"
print "--------------------------------------------------"
print prob4.status
print "Minimum Generating Cost : %4.2f"%(prob4.value),"USD"
print " "
print "Node 0 [Grid] Gen Power : p_0 = %1.3f"%(p[0].value), "MW | q_0 = %1.3f"%(q[0].value), "MW | s_0 = %1.3f"%(s[0].value),"MW || mu_s0 = %3.0f"%(constraints[0].dual_value[0]), "USD/MW"
print "Node 3 [Gas] Gen Power : p_3 = %1.3f"%(p[3].value), "MW | q_3 = %1.3f"%(q[3].value), "MW | s_3 = %1.3f"%(s[3].value),"MW || mu_s4 = %3.0f"%(constraints[0].dual_value[3]), "USD/MW"
print "Node 9 [Solar] Gen Power : p_9 = %1.3f"%(p[9].value), "MW | q_9 = %1.3f"%(q[9].value), "MW | s_9 = %1.3f"%(s[9].value),"MW || mu_s9 = %3.0f"%(constraints[0].dual_value[9]), "USD/MW"
print " "
print "Total active power : %1.3f"%(sum(l_P)),"MW consumed | %1.3f"%(sum(p.value)),"MW generated"
print "Total reactive power : %1.3f"%(sum(l_Q)),"MVAr consumed | %1.3f"%(sum(q.value)),"MVAr generated"
print "Total apparent power : %1.3f"%(sum(l_S)),"MVA consumed | %1.3f"%(sum(s.value)),"MVA generated"
print " "
for jj in j_idx:
print "Node %2.0f"%(jj), "Voltage : %1.3f"%((V[jj].value)**0.5), "p.u."
## Problem 5
# Assumptions:
# - Assume solar generator at node 9 has uncertain power capacity
# - Goal is to minimize generation costs, given by c^T s, in face of uncertainty
| 0.366476 | 0.701279 |
```
import pyspark
import re
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName("RDD assignment").getOrCreate()
```
### Question-1 Create RDDs in threee different ways
1. Using parallelize method
```
rdd_par = spark.sparkContext.parallelize(["Hello world", "Hope you are bot fed with ABD class", "ello"])
print(rdd_par.count())
```
2. Using transformation
```
rdd_trans = rdd_par.filter(lambda word: word.startswith('H'))
print(rdd_trans.collect())
```
3. Using data source
```
rdd_ds = spark.sparkContext.textFile('input.txt')
print(rdd_ds.collect())
```
### Read a text file and count the number of words in the file using RDD operations.
```
rdd = spark.sparkContext.textFile('input.txt')
word_rdd = rdd.flatMap(lambda line : line.split(' '))
filtered_word = word_rdd.filter(lambda word : len(word) > 0)
print(f'Number of words in the file is {filtered_word.count()}')
```
### Write a program to find the word frequency in a given file.
```
rdd = spark.sparkContext.textFile('input.txt')
filtered_lines = rdd.filter(lambda line : line.strip()) # to remove lines that are empty
word_rdd = filtered_lines.flatMap(lambda line : line.split(' '))
freq_words = word_rdd.map(lambda word : (word, 1))
sorted(freq_words.reduceByKey(lambda a,b : a + b).collect(),
key=lambda x:x[1], reverse=True)
```
### Write a program to convert all words in a file to uppercase.
```
rdd = spark.sparkContext.textFile('input.txt')
converted_rdd = rdd.flatMap(lambda line : line.split(' '))
converted_rdd = rdd.map(lambda line : line.upper())
with open('UppercaseFile.txt', 'w') as f:
for line in converted_rdd.collect():
f.write(f'{line}\n')
```
### Write a program to convert all words in a file to lowercase.
```
rdd = spark.sparkContext.textFile('input.txt')
converted_rdd = rdd.flatMap(lambda line : line.split(' '))
converted_rdd = rdd.map(lambda line : line.lower())
with open('LowercaseFile.txt', 'w') as f:
for line in converted_rdd.collect():
f.write(f'{line}\n')
```
### Write a program to capitalize first letter of each words in file (use string capitalize() method).
```
rdd = spark.sparkContext.textFile('input.txt')
converted_rdd = rdd.flatMap(lambda line : line.split(' '))
converted_rdd = converted_rdd.map(lambda word : word.capitalize())
with open('HeadingcaseFile.txt', 'w') as f:
for line in converted_rdd.collect():
f.write(f'{line}\n')
```
### Find the longest length of word from given set of words.
```
rdd = spark.sparkContext.textFile('input.txt')
word_rdd = rdd.flatMap(lambda line : line.split(' '))
max_word = word_rdd.max()
print(f'Max length word is {max_word} with length {len(max_word)}')
```
### Map the Registration numbers to corresponding branch.
* 1000 series ML
* 2000 series VLSI
* 3000 series ES
* 4000 series MSc
* 5000 series CC
* 6000 series BDA
* 9000 series HDA
Given registration number, generate a key-value pair of Registration Number and Corresponding Branch.
```
import numpy as np
# before running please delete the regProg folder
program = {
1: 'ML', 2: 'VLSI', 3: 'ES', 4: 'MSc', 5: 'CC', 6: 'BDA', 9: 'HDA'
}
rdd = spark.sparkContext.textFile('reg.txt')
reg_rdd = rdd.map(lambda x : (x, program[int(x) // 1000]) if int(x) // 1000 in program else (x, np.nan))
reg_rdd.collect()
reg_rdd.saveAsTextFile('regProg')
```
### Text file contain numbers. Numbers are separated by one white space. There is no order to store the numbers. One line may contain one or more numbers.
Find the maximum, minimum, sum and mean of numbers.
```
rdd = spark.sparkContext.textFile('numbers.txt')
text_num_rdd = rdd.flatMap(lambda x : x.split(' '))
num_rdd = text_num_rdd.map(lambda x : float(x))
max_num = num_rdd.max()
min_num = num_rdd.min()
sum_num = num_rdd.sum()
mean_num = num_rdd.mean()
print(f'Max number = {max_num}')
print(f'Min number = {min_num}')
print(f'Sum of numbers = {sum_num}')
print(f'Mean of numbers = {mean_num}')
```
### A text file (citizen.txt) contains data about citizens of country. Fields (information in file) are Name, dob, Phone, email and state name. Another file contains mapping of state names to state code like Karnataka is codes as KA, TamilNadu as TN, Kerala KL etc. Compress the citizen.txt file by changing full state name to state code
```
# Remove compressed_citizen_df before running
state_mapping = spark.read.csv("state_mapping.txt", inferSchema=True, header=True)
state_mapping = map(lambda row : row.asDict(), state_mapping.collect())
state_mapping = {state['state']: state['statecode'] for state in state_mapping}
citizen_df = spark.read.csv("citizen.txt", inferSchema=True, header=True)
updated_citizen_df = citizen_df.na.replace(state_mapping, 1)
updated_citizen_df.write.csv('compressed_citizen_df', header=True)
```
|
github_jupyter
|
import pyspark
import re
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName("RDD assignment").getOrCreate()
rdd_par = spark.sparkContext.parallelize(["Hello world", "Hope you are bot fed with ABD class", "ello"])
print(rdd_par.count())
rdd_trans = rdd_par.filter(lambda word: word.startswith('H'))
print(rdd_trans.collect())
rdd_ds = spark.sparkContext.textFile('input.txt')
print(rdd_ds.collect())
rdd = spark.sparkContext.textFile('input.txt')
word_rdd = rdd.flatMap(lambda line : line.split(' '))
filtered_word = word_rdd.filter(lambda word : len(word) > 0)
print(f'Number of words in the file is {filtered_word.count()}')
rdd = spark.sparkContext.textFile('input.txt')
filtered_lines = rdd.filter(lambda line : line.strip()) # to remove lines that are empty
word_rdd = filtered_lines.flatMap(lambda line : line.split(' '))
freq_words = word_rdd.map(lambda word : (word, 1))
sorted(freq_words.reduceByKey(lambda a,b : a + b).collect(),
key=lambda x:x[1], reverse=True)
rdd = spark.sparkContext.textFile('input.txt')
converted_rdd = rdd.flatMap(lambda line : line.split(' '))
converted_rdd = rdd.map(lambda line : line.upper())
with open('UppercaseFile.txt', 'w') as f:
for line in converted_rdd.collect():
f.write(f'{line}\n')
rdd = spark.sparkContext.textFile('input.txt')
converted_rdd = rdd.flatMap(lambda line : line.split(' '))
converted_rdd = rdd.map(lambda line : line.lower())
with open('LowercaseFile.txt', 'w') as f:
for line in converted_rdd.collect():
f.write(f'{line}\n')
rdd = spark.sparkContext.textFile('input.txt')
converted_rdd = rdd.flatMap(lambda line : line.split(' '))
converted_rdd = converted_rdd.map(lambda word : word.capitalize())
with open('HeadingcaseFile.txt', 'w') as f:
for line in converted_rdd.collect():
f.write(f'{line}\n')
rdd = spark.sparkContext.textFile('input.txt')
word_rdd = rdd.flatMap(lambda line : line.split(' '))
max_word = word_rdd.max()
print(f'Max length word is {max_word} with length {len(max_word)}')
import numpy as np
# before running please delete the regProg folder
program = {
1: 'ML', 2: 'VLSI', 3: 'ES', 4: 'MSc', 5: 'CC', 6: 'BDA', 9: 'HDA'
}
rdd = spark.sparkContext.textFile('reg.txt')
reg_rdd = rdd.map(lambda x : (x, program[int(x) // 1000]) if int(x) // 1000 in program else (x, np.nan))
reg_rdd.collect()
reg_rdd.saveAsTextFile('regProg')
rdd = spark.sparkContext.textFile('numbers.txt')
text_num_rdd = rdd.flatMap(lambda x : x.split(' '))
num_rdd = text_num_rdd.map(lambda x : float(x))
max_num = num_rdd.max()
min_num = num_rdd.min()
sum_num = num_rdd.sum()
mean_num = num_rdd.mean()
print(f'Max number = {max_num}')
print(f'Min number = {min_num}')
print(f'Sum of numbers = {sum_num}')
print(f'Mean of numbers = {mean_num}')
# Remove compressed_citizen_df before running
state_mapping = spark.read.csv("state_mapping.txt", inferSchema=True, header=True)
state_mapping = map(lambda row : row.asDict(), state_mapping.collect())
state_mapping = {state['state']: state['statecode'] for state in state_mapping}
citizen_df = spark.read.csv("citizen.txt", inferSchema=True, header=True)
updated_citizen_df = citizen_df.na.replace(state_mapping, 1)
updated_citizen_df.write.csv('compressed_citizen_df', header=True)
| 0.360602 | 0.89534 |
<a href="https://colab.research.google.com/github/vishant016/140_VISHANT/blob/main/LAB5/2_linear_regression_pytorch.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
# Import Numpy & PyTorch
import numpy as np
import torch
```
## Linear Regression Model using PyTorch built-ins
Let's re-implement the same model using some built-in functions and classes from PyTorch.
And now using two different targets: Apples and Oranges
```
# Imports
import torch.nn as nn
from google.colab import drive
drive.mount('/content/drive')
# Input (temp, rainfall, humidity)
inputs = np.array([[73, 67, 43], [91, 88, 64], [87, 134, 58], [102, 43, 37], [69, 96, 70], [73, 67, 43], [91, 88, 64], [87, 134, 58], [102, 43, 37], [69, 96, 70], [73, 67, 43], [91, 88, 64], [87, 134, 58], [102, 43, 37], [69, 96, 70]], dtype='float32')
# Targets (apples, oranges)
targets = np.array([[56, 70], [81, 101], [119, 133], [22, 37], [103, 119],
[56, 70], [81, 101], [119, 133], [22, 37], [103, 119],
[56, 70], [81, 101], [119, 133], [22, 37], [103, 119]], dtype='float32')
inputs = torch.from_numpy(inputs)
targets = torch.from_numpy(targets)
```
### Dataset and DataLoader
We'll create a `TensorDataset`, which allows access to rows from `inputs` and `targets` as tuples. We'll also create a DataLoader, to split the data into batches while training. It also provides other utilities like shuffling and sampling.
```
# Import tensor dataset & data loader
from torch.utils.data import TensorDataset, DataLoader
# Define dataset
dataset = torch.utils.data.TensorDataset(inputs,targets)
# Define data loader
dataloader = torch.utils.data.DataLoader(dataset,batch_size=5)
```
### nn.Linear
Instead of initializing the weights & biases manually, we can define the model using `nn.Linear`.
```
# Define model
model = nn.Linear(inputs.shape[1],targets.shape[1])
model.parameters()
```
### Optimizer
Instead of manually manipulating the weights & biases using gradients, we can use the optimizer `optim.SGD`.
```
# Define optimizer
optimizer = torch.optim.SGD(model.parameters(),lr=1e-4)
```
### Loss Function
Instead of defining a loss function manually, we can use the built-in loss function `mse_loss`.
```
# Import nn.functional
import torch.nn.functional as F
# Define loss function
loss_fn = F.mse_loss
#loss = loss_fn(? , ?)
#print(loss)
```
### Train the model
We are ready to train the model now. We can define a utility function `fit` which trains the model for a given number of epochs.
```
# Define a utility function to train the model
def fit(num_epochs, model, loss_fn, opt):
for epoch in range(num_epochs):
for xb,yb in dataloader:
# Generate predictions
pred = model(xb)
loss = loss_fn(yb,pred)
# Perform gradient descent
loss.backward()
opt.step()
opt.zero_grad()
print('Training loss: ', loss_fn(model(inputs), targets))
# Train the model for 100 epochs
fit(120 , model , loss_fn, optimizer)
# Generate predictions
#preds = model(?)
#preds
preds = model(inputs)
preds
# Compare with targets
targets
```
Now we can define the model, optimizer and loss function exactly as before.
#Exercise 1:
Try Linear Regression just using numpy (Without Tensorflow/Pytorch or other torch library). You can optionally use sklearn (if you want)
#Exercise 2:
Try Linear regression on same prediction data using Tensorflow
```
from sklearn.linear_model import LinearRegression
model = LinearRegression()
from sklearn.model_selection import train_test_split
X_train,X_test,Y_train,Y_test = train_test_split(inputs,targets,test_size=0.5,random_state=140)
model.fit(X_train,Y_train)
predictions = model.predict(X_test)
predictions
Y_test
from sklearn.metrics import mean_squared_error
print(mean_squared_error(Y_test,predictions))
import tensorflow as tf
inputs = np.array([[73, 67, 43], [91, 88, 64], [87, 134, 58], [102, 43, 37], [69, 96, 70], [73, 67, 43], [91, 88, 64], [87, 134, 58], [102, 43, 37], [69, 96, 70], [73, 67, 43], [91, 88, 64], [87, 134, 58], [102, 43, 37], [69, 96, 70]], dtype='float64')
# Targets (apples, oranges)
targets = np.array([[56, 70], [81, 101], [119, 133], [22, 37], [103, 119],
[56, 70], [81, 101], [119, 133], [22, 37], [103, 119],
[56, 70], [81, 101], [119, 133], [22, 37], [103, 119]], dtype='float64')
model = tf.keras.Sequential()
model.compile(loss="mean_squared_error")
inputs = tf.Variable(inputs)
targets = tf.Variable(targets)
print("targets :\n",targets)
v = np.random.rand(3,2)
r = np.random.randn(2)
v = tf.Variable(v)
r = tf.Variable(r)
print(v)
print()
print(r)
def model(s):
return s @ v + r
prediction = model(inputs)
prediction
def mse(t1,t2):
return tf.reduce_mean(tf.square(t1 - t2))
print(mse(prediction,targets))
epochs = 25
for epoch_count in range(epochs):
with tf.GradientTape(persistent=True) as t:
current_loss = mse(targets, model(inputs))
v1 = t.gradient(current_loss,v)
r1 = t.gradient(current_loss,r)
v.assign_sub(1e-4 * v1)
r.assign_sub(1e-4 * r1)
print(f"Epoch: {epoch_count} \n Loss: {current_loss.numpy()} \n\n")
model(inputs)
```
|
github_jupyter
|
# Import Numpy & PyTorch
import numpy as np
import torch
# Imports
import torch.nn as nn
from google.colab import drive
drive.mount('/content/drive')
# Input (temp, rainfall, humidity)
inputs = np.array([[73, 67, 43], [91, 88, 64], [87, 134, 58], [102, 43, 37], [69, 96, 70], [73, 67, 43], [91, 88, 64], [87, 134, 58], [102, 43, 37], [69, 96, 70], [73, 67, 43], [91, 88, 64], [87, 134, 58], [102, 43, 37], [69, 96, 70]], dtype='float32')
# Targets (apples, oranges)
targets = np.array([[56, 70], [81, 101], [119, 133], [22, 37], [103, 119],
[56, 70], [81, 101], [119, 133], [22, 37], [103, 119],
[56, 70], [81, 101], [119, 133], [22, 37], [103, 119]], dtype='float32')
inputs = torch.from_numpy(inputs)
targets = torch.from_numpy(targets)
# Import tensor dataset & data loader
from torch.utils.data import TensorDataset, DataLoader
# Define dataset
dataset = torch.utils.data.TensorDataset(inputs,targets)
# Define data loader
dataloader = torch.utils.data.DataLoader(dataset,batch_size=5)
# Define model
model = nn.Linear(inputs.shape[1],targets.shape[1])
model.parameters()
# Define optimizer
optimizer = torch.optim.SGD(model.parameters(),lr=1e-4)
# Import nn.functional
import torch.nn.functional as F
# Define loss function
loss_fn = F.mse_loss
#loss = loss_fn(? , ?)
#print(loss)
# Define a utility function to train the model
def fit(num_epochs, model, loss_fn, opt):
for epoch in range(num_epochs):
for xb,yb in dataloader:
# Generate predictions
pred = model(xb)
loss = loss_fn(yb,pred)
# Perform gradient descent
loss.backward()
opt.step()
opt.zero_grad()
print('Training loss: ', loss_fn(model(inputs), targets))
# Train the model for 100 epochs
fit(120 , model , loss_fn, optimizer)
# Generate predictions
#preds = model(?)
#preds
preds = model(inputs)
preds
# Compare with targets
targets
from sklearn.linear_model import LinearRegression
model = LinearRegression()
from sklearn.model_selection import train_test_split
X_train,X_test,Y_train,Y_test = train_test_split(inputs,targets,test_size=0.5,random_state=140)
model.fit(X_train,Y_train)
predictions = model.predict(X_test)
predictions
Y_test
from sklearn.metrics import mean_squared_error
print(mean_squared_error(Y_test,predictions))
import tensorflow as tf
inputs = np.array([[73, 67, 43], [91, 88, 64], [87, 134, 58], [102, 43, 37], [69, 96, 70], [73, 67, 43], [91, 88, 64], [87, 134, 58], [102, 43, 37], [69, 96, 70], [73, 67, 43], [91, 88, 64], [87, 134, 58], [102, 43, 37], [69, 96, 70]], dtype='float64')
# Targets (apples, oranges)
targets = np.array([[56, 70], [81, 101], [119, 133], [22, 37], [103, 119],
[56, 70], [81, 101], [119, 133], [22, 37], [103, 119],
[56, 70], [81, 101], [119, 133], [22, 37], [103, 119]], dtype='float64')
model = tf.keras.Sequential()
model.compile(loss="mean_squared_error")
inputs = tf.Variable(inputs)
targets = tf.Variable(targets)
print("targets :\n",targets)
v = np.random.rand(3,2)
r = np.random.randn(2)
v = tf.Variable(v)
r = tf.Variable(r)
print(v)
print()
print(r)
def model(s):
return s @ v + r
prediction = model(inputs)
prediction
def mse(t1,t2):
return tf.reduce_mean(tf.square(t1 - t2))
print(mse(prediction,targets))
epochs = 25
for epoch_count in range(epochs):
with tf.GradientTape(persistent=True) as t:
current_loss = mse(targets, model(inputs))
v1 = t.gradient(current_loss,v)
r1 = t.gradient(current_loss,r)
v.assign_sub(1e-4 * v1)
r.assign_sub(1e-4 * r1)
print(f"Epoch: {epoch_count} \n Loss: {current_loss.numpy()} \n\n")
model(inputs)
| 0.691497 | 0.987723 |
* 作業目標:
* 運用編碼處理類別資料<br>
* 補缺失值
* 作業重點:
* 類別編碼有多種方法,需分辨使用方法與時機
* 補缺失值須因應情境決定如何補值
* 題目 :
* 【基本11】
1. 根據題目給的 DataFrame 完成下列操作:
- 計算每個不同種類 animal 的 age 的平均數
- 計算每個不同種類 animal 的 age 的平均數
- 將資料依照 Age 欄位由小到大排序,再依照 visits 欄位由大到小排序
- 將 priority 欄位中的 yes 和 no 字串,換成是布林值 的 True 和 False
2. 一個包含兩個欄位的 DataFrame,將每個數字減去
1) 該欄位的平均數
2) 該筆資料平均數
3. 承上題,請問:
1) 哪一比的資料總合最小
2) 哪一欄位的資料總合最小
* 【進階11-1】
1. 對以下成績資料做分析
- 6 號學生(student_id=6) 3 科平均分數為何?
- 6 號學生 3 科平均分數是否有贏過班上一半的同學?
- 由於班上同學成績不好,所以學校統一加分,加分方式為開根號乘以十,請問 6 號同學 3 科成績分別是?
- 承上題,加分後各科班平均變多少?
```
score_df = pd.DataFrame([[1,56,66,70], [2,90,45,34], [3,45,32,55], [4,70,77,89], [5,56,80,70], [6,60,54,55], [7,45,70,79], [8,34,77,76], [9,25,87,60], [10,88,40,43]],columns=['student_id','math_score','english_score','chinese_score'])
```
* 【進階11-2】
1. 將以下問卷資料的職業(Profession)欄位缺失值填入字串'others',更進一步將字串做編碼。
2. 此時用什麼方式做編碼比較適合?為什麼?
```
import numpy as np
import pandas as pd
print( 'Numpy 版本: ', np.__version__ )
print( 'Pandas 版本: ', pd.__version__ )
```
* 【基礎11】
```
#1
data = {
'animal': ['cat', 'cat', 'snake', 'dog', 'dog', 'cat', 'snake', 'cat', 'dog', 'dog'],
'age': [2.5, 3, 0.5, np.nan, 5, 2, 4.5, np.nan, 7, 3],
'visits': [1, 3, 2, 3, 2, 3, 1, 1, 2, 1],
'priority': ['yes', 'yes', 'no', 'yes', 'no', 'no', 'no', 'yes', 'no', 'no']
}
labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']
df = pd.DataFrame(data, index=labels)
df
print('計算每個不同種類 animal 的 age 的平均數')
for i in set(df['animal']):
print(f'{i}\'s average age is', df[df['animal']==i]['age'].mean())
print('='*20)
print('\n將資料依照 Age 欄位由小到大排序,再依照 visits 欄位由大到小排序')
print(df.sort_values(by=['age', 'visits'], ascending=[True,False]))
print('='*20)
print('\n將 priority 欄位中的 yes 和 no 字串,換成是布林值 的 True 和 False')
df['priority'].replace({'yes':True, 'no':False}, inplace = True)
print(df)
## 瘦身寫法
print('計算每個不同種類 animal 的 age 的平均數')
a = df.groupby(['animal']).mean()
print(a['age'])
print('='*20)
print('\n將資料依照 Age 欄位由小到大排序,再依照 visits 欄位由大到小排序')
print(df.sort_values(by=['age', 'visits'], ascending=[True,False]))
print('='*20)
print('\n將 priority 欄位中的 yes 和 no 字串,換成是布林值 的 True 和 False')
print(df.replace(['yes', 'no'], ['True', 'False']))
print( 'Over All Mean' )
print( df.age.mean() )
print( df['age'].mean(axis=0) )
#2
df = pd.DataFrame(np.random.random(size=(5, 3)))
df
# 該欄位的平均數
print('該欄位的平均數')
for i in df.columns[:]:
print(f'column {i}\'s average is {df[i].mean()}')
print('='*20)
print('該筆資料平均數')
for i in df.index[:]:
print(f'index {i}\'s average is {df.loc[i].mean()}')
##瘦身寫法
print('每個數字減去該欄位的平均數')
print( df-df.mean(axis=0) )
print('='*20)
print('每個數字減去該筆資料平均數')
print( df.sub(df.mean(axis=1), axis=0) ) #加法用add
print( df.apply(lambda x: x-df.mean(axis=1)) ) #apply寫法
#3
print('哪一筆的資料總合最小')
df_sum_index = { i:df.loc[i].sum() for i in df.index[:] }
print( ''.join([str(i) for i in df_sum_index.keys() if df_sum_index[i]==min(df_sum_index.values())]) )
print('='*20)
print('哪一欄位的資料總合最小')
df_sum_col = { i:df[i].sum() for i in df.columns[:] }
print( ''.join([str(i) for i in df_sum_col.keys() if df_sum_col[i]==min(df_sum_col.values())]) )
##瘦身寫法
print('哪一筆的資料總合最小')
print( df.sum(axis=1).idxmin() )
print('='*20)
print('哪一欄位的資料總合最小')
print( df.sum(axis=0).idxmin() )
```
* 【進階11-1】
```
score_df = pd.DataFrame([[1,56,66,70], [2,90,45,34], [3,45,32,55], [4,70,77,89], [5,56,80,70], [6,60,54,55], [7,45,70,79], [8,34,77,76], [9,25,87,60], [10,88,40,43]],
columns=['student_id','math_score','english_score','chinese_score'])
score_df = score_df.set_index('student_id')
score_df
#1. 6號學生(student_id=6)3科平均分數為何?
print(score_df.loc[6].mean())
#2. 6號學生3科平均分數是否有贏過班上一半的同學
score_df['average_score'] = [score_df.loc[i].mean() for i in score_df.index[:]]
print('Yes' if list(score_df.sort_values(by=['average_score']).index[:]).index(6) >= len(score_df)/2 else 'No')
#另法
num6 = score_df.loc[6].mean()
median = (score_df.sum(axis = 1) / 3).median()
if num6 > median:
print('Yes')
else:
print('No')
#由於班上同學成績不好,所以學校統一加分,加分方式為開根號乘以十,請問6號同學3科成績分別是?
#score_df.drop(columns='average_score', inplace=True)
score_df.iloc[:] = score_df.iloc[:].apply(lambda x : x**(0.5)*10)
print(score_df.loc[6])
#另法
score_df = score_df.apply(lambda x: x**(0.5)* 10)
score_df.loc[6]
#承上題,加分後各科班平均變多少
print(score_df.loc[:].mean())
print('='*20)
print(score_df.mean())
```
* 【進階11-2】
```
# 1.將以下問卷資料的職業(Profession)欄位缺失值填入字串'others',更進一步將字串做編碼
q_df = pd.DataFrame( [['male', 'teacher'],
['male', 'engineer'],
['female', None],
['female', 'engineer']], columns=['Sex','Profession'] )
q_df
#缺失值填入字串'others'
q_df.fillna( value='others', inplace=True )
q_df
#更進一步將字串做編碼。此時用什麼方式做編碼比較適合?為什麼?
# Ans: One-Hot encoding [虛擬變數(dummy variable)的方法];因職業之間並沒有順序及大小之分。
OneHot = pd.get_dummies( q_df.Profession, prefix='Profession' )
q_df = pd.concat( [q_df, OneHot], axis=1 )
q_df
```
|
github_jupyter
|
score_df = pd.DataFrame([[1,56,66,70], [2,90,45,34], [3,45,32,55], [4,70,77,89], [5,56,80,70], [6,60,54,55], [7,45,70,79], [8,34,77,76], [9,25,87,60], [10,88,40,43]],columns=['student_id','math_score','english_score','chinese_score'])
import numpy as np
import pandas as pd
print( 'Numpy 版本: ', np.__version__ )
print( 'Pandas 版本: ', pd.__version__ )
#1
data = {
'animal': ['cat', 'cat', 'snake', 'dog', 'dog', 'cat', 'snake', 'cat', 'dog', 'dog'],
'age': [2.5, 3, 0.5, np.nan, 5, 2, 4.5, np.nan, 7, 3],
'visits': [1, 3, 2, 3, 2, 3, 1, 1, 2, 1],
'priority': ['yes', 'yes', 'no', 'yes', 'no', 'no', 'no', 'yes', 'no', 'no']
}
labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']
df = pd.DataFrame(data, index=labels)
df
print('計算每個不同種類 animal 的 age 的平均數')
for i in set(df['animal']):
print(f'{i}\'s average age is', df[df['animal']==i]['age'].mean())
print('='*20)
print('\n將資料依照 Age 欄位由小到大排序,再依照 visits 欄位由大到小排序')
print(df.sort_values(by=['age', 'visits'], ascending=[True,False]))
print('='*20)
print('\n將 priority 欄位中的 yes 和 no 字串,換成是布林值 的 True 和 False')
df['priority'].replace({'yes':True, 'no':False}, inplace = True)
print(df)
## 瘦身寫法
print('計算每個不同種類 animal 的 age 的平均數')
a = df.groupby(['animal']).mean()
print(a['age'])
print('='*20)
print('\n將資料依照 Age 欄位由小到大排序,再依照 visits 欄位由大到小排序')
print(df.sort_values(by=['age', 'visits'], ascending=[True,False]))
print('='*20)
print('\n將 priority 欄位中的 yes 和 no 字串,換成是布林值 的 True 和 False')
print(df.replace(['yes', 'no'], ['True', 'False']))
print( 'Over All Mean' )
print( df.age.mean() )
print( df['age'].mean(axis=0) )
#2
df = pd.DataFrame(np.random.random(size=(5, 3)))
df
# 該欄位的平均數
print('該欄位的平均數')
for i in df.columns[:]:
print(f'column {i}\'s average is {df[i].mean()}')
print('='*20)
print('該筆資料平均數')
for i in df.index[:]:
print(f'index {i}\'s average is {df.loc[i].mean()}')
##瘦身寫法
print('每個數字減去該欄位的平均數')
print( df-df.mean(axis=0) )
print('='*20)
print('每個數字減去該筆資料平均數')
print( df.sub(df.mean(axis=1), axis=0) ) #加法用add
print( df.apply(lambda x: x-df.mean(axis=1)) ) #apply寫法
#3
print('哪一筆的資料總合最小')
df_sum_index = { i:df.loc[i].sum() for i in df.index[:] }
print( ''.join([str(i) for i in df_sum_index.keys() if df_sum_index[i]==min(df_sum_index.values())]) )
print('='*20)
print('哪一欄位的資料總合最小')
df_sum_col = { i:df[i].sum() for i in df.columns[:] }
print( ''.join([str(i) for i in df_sum_col.keys() if df_sum_col[i]==min(df_sum_col.values())]) )
##瘦身寫法
print('哪一筆的資料總合最小')
print( df.sum(axis=1).idxmin() )
print('='*20)
print('哪一欄位的資料總合最小')
print( df.sum(axis=0).idxmin() )
score_df = pd.DataFrame([[1,56,66,70], [2,90,45,34], [3,45,32,55], [4,70,77,89], [5,56,80,70], [6,60,54,55], [7,45,70,79], [8,34,77,76], [9,25,87,60], [10,88,40,43]],
columns=['student_id','math_score','english_score','chinese_score'])
score_df = score_df.set_index('student_id')
score_df
#1. 6號學生(student_id=6)3科平均分數為何?
print(score_df.loc[6].mean())
#2. 6號學生3科平均分數是否有贏過班上一半的同學
score_df['average_score'] = [score_df.loc[i].mean() for i in score_df.index[:]]
print('Yes' if list(score_df.sort_values(by=['average_score']).index[:]).index(6) >= len(score_df)/2 else 'No')
#另法
num6 = score_df.loc[6].mean()
median = (score_df.sum(axis = 1) / 3).median()
if num6 > median:
print('Yes')
else:
print('No')
#由於班上同學成績不好,所以學校統一加分,加分方式為開根號乘以十,請問6號同學3科成績分別是?
#score_df.drop(columns='average_score', inplace=True)
score_df.iloc[:] = score_df.iloc[:].apply(lambda x : x**(0.5)*10)
print(score_df.loc[6])
#另法
score_df = score_df.apply(lambda x: x**(0.5)* 10)
score_df.loc[6]
#承上題,加分後各科班平均變多少
print(score_df.loc[:].mean())
print('='*20)
print(score_df.mean())
# 1.將以下問卷資料的職業(Profession)欄位缺失值填入字串'others',更進一步將字串做編碼
q_df = pd.DataFrame( [['male', 'teacher'],
['male', 'engineer'],
['female', None],
['female', 'engineer']], columns=['Sex','Profession'] )
q_df
#缺失值填入字串'others'
q_df.fillna( value='others', inplace=True )
q_df
#更進一步將字串做編碼。此時用什麼方式做編碼比較適合?為什麼?
# Ans: One-Hot encoding [虛擬變數(dummy variable)的方法];因職業之間並沒有順序及大小之分。
OneHot = pd.get_dummies( q_df.Profession, prefix='Profession' )
q_df = pd.concat( [q_df, OneHot], axis=1 )
q_df
| 0.095711 | 0.790166 |
<a href="https://colab.research.google.com/github/agemagician/CodeTrans/blob/main/prediction/transfer%20learning%20fine-tuning/function%20documentation%20generation/javascript/small_model.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
**<h3>Predict the documentation for javascript code using codeTrans transfer learning finetuning model</h3>**
<h4>You can make free prediction online through this
<a href="https://huggingface.co/SEBIS/code_trans_t5_small_code_documentation_generation_javascript_transfer_learning_finetune">Link</a></h4> (When using the prediction online, you need to parse and tokenize the code first.)
**1. Load necessry libraries including huggingface transformers**
```
!pip install -q transformers sentencepiece
from transformers import AutoTokenizer, AutoModelWithLMHead, SummarizationPipeline
```
**2. Load the token classification pipeline and load it into the GPU if avilabile**
```
pipeline = SummarizationPipeline(
model=AutoModelWithLMHead.from_pretrained("SEBIS/code_trans_t5_small_code_documentation_generation_javascript_transfer_learning_finetune"),
tokenizer=AutoTokenizer.from_pretrained("SEBIS/code_trans_t5_small_code_documentation_generation_javascript_transfer_learning_finetune", skip_special_tokens=True),
device=0
)
```
**3 Give the code for summarization, parse and tokenize it**
```
code = "function isStandardBrowserEnv() {\n if (typeof navigator !== 'undefined' && (navigator.product === 'ReactNative' ||\n navigator.product === 'NativeScript' ||\n navigator.product === 'NS')) {\n return false;\n }\n return (\n typeof window !== 'undefined' &&\n typeof document !== 'undefined'\n );\n}" #@param {type:"raw"}
!pip install tree_sitter
!git clone https://github.com/tree-sitter/tree-sitter-javascript
from tree_sitter import Language, Parser
Language.build_library(
'build/my-languages.so',
['tree-sitter-javascript']
)
JAVASCRIPT_LANGUAGE = Language('build/my-languages.so', 'javascript')
parser = Parser()
parser.set_language(JAVASCRIPT_LANGUAGE)
def get_string_from_code(node, lines):
line_start = node.start_point[0]
line_end = node.end_point[0]
char_start = node.start_point[1]
char_end = node.end_point[1]
if line_start != line_end:
code_list.append(' '.join([lines[line_start][char_start:]] + lines[line_start+1:line_end] + [lines[line_end][:char_end]]))
else:
code_list.append(lines[line_start][char_start:char_end])
def my_traverse(node, code_list):
lines = code.split('\n')
if node.child_count == 0:
get_string_from_code(node, lines)
elif node.type == 'string':
get_string_from_code(node, lines)
else:
for n in node.children:
my_traverse(n, code_list)
return ' '.join(code_list)
tree = parser.parse(bytes(code, "utf8"))
code_list=[]
tokenized_code = my_traverse(tree.root_node, code_list)
print("Output after tokenization: " + tokenized_code)
```
**4. Make Prediction**
```
pipeline([tokenized_code])
```
|
github_jupyter
|
!pip install -q transformers sentencepiece
from transformers import AutoTokenizer, AutoModelWithLMHead, SummarizationPipeline
pipeline = SummarizationPipeline(
model=AutoModelWithLMHead.from_pretrained("SEBIS/code_trans_t5_small_code_documentation_generation_javascript_transfer_learning_finetune"),
tokenizer=AutoTokenizer.from_pretrained("SEBIS/code_trans_t5_small_code_documentation_generation_javascript_transfer_learning_finetune", skip_special_tokens=True),
device=0
)
code = "function isStandardBrowserEnv() {\n if (typeof navigator !== 'undefined' && (navigator.product === 'ReactNative' ||\n navigator.product === 'NativeScript' ||\n navigator.product === 'NS')) {\n return false;\n }\n return (\n typeof window !== 'undefined' &&\n typeof document !== 'undefined'\n );\n}" #@param {type:"raw"}
!pip install tree_sitter
!git clone https://github.com/tree-sitter/tree-sitter-javascript
from tree_sitter import Language, Parser
Language.build_library(
'build/my-languages.so',
['tree-sitter-javascript']
)
JAVASCRIPT_LANGUAGE = Language('build/my-languages.so', 'javascript')
parser = Parser()
parser.set_language(JAVASCRIPT_LANGUAGE)
def get_string_from_code(node, lines):
line_start = node.start_point[0]
line_end = node.end_point[0]
char_start = node.start_point[1]
char_end = node.end_point[1]
if line_start != line_end:
code_list.append(' '.join([lines[line_start][char_start:]] + lines[line_start+1:line_end] + [lines[line_end][:char_end]]))
else:
code_list.append(lines[line_start][char_start:char_end])
def my_traverse(node, code_list):
lines = code.split('\n')
if node.child_count == 0:
get_string_from_code(node, lines)
elif node.type == 'string':
get_string_from_code(node, lines)
else:
for n in node.children:
my_traverse(n, code_list)
return ' '.join(code_list)
tree = parser.parse(bytes(code, "utf8"))
code_list=[]
tokenized_code = my_traverse(tree.root_node, code_list)
print("Output after tokenization: " + tokenized_code)
pipeline([tokenized_code])
| 0.598312 | 0.878001 |
# Querying firestore at random
### Initialize
```
from google.cloud import firestore
import pandas as pd
import itertools
import numpy as np
from scipy.stats import norm
import matplotlib.pyplot as plt
%matplotlib inline
db = firestore.Client.from_service_account_json("../credentials/stairway-firestore-key.json")
```
## At random
Firestore doesn't have a random query functionality, so you will have to implement a randomizer yourself. There's two ways this could be done:
1. If you know the distribution of your document ids, draw randomly from there and query documents based on their id directly.
2. Choose some feature of your documents, draw a random number from it and use it to subset the data. Then sort the result and retrieve the top k documents (or sort descending if nothing found).
### Implementation of random 1
Showstopper: can Firestore query multiple documents at once based on document id? If not, a for loop is required to fetch all destinations (=> multiple queries instead of one).
TODO: investigate!
### Implementation of random 2
For this, we need to select a feature first and look at its distribution to start drawing random numbers from.
```
dest = pd.read_csv("../data/destinations.csv")
dest = dest.loc[dest['succes'] == 1] # only use OK destination data
dest.index = dest['id'] # set firestore document id equal to stairway destination id
dest.shape
```
For example, `osp_importance`:
```
feature = 'osp_importance'
# Fit a normal distribution to the data:
mu, std = norm.fit(dest[feature])
# Plot the histogram.
plt.hist(dest[feature], density=True, alpha=0.6)
# Plot the PDF.
xmin, xmax = plt.xlim()
x = np.linspace(xmin, xmax, 100)
p = norm.pdf(x, mu, std)
plt.plot(x, p, 'k', linewidth=2)
title = "Fit results: mu = %.2f, std = %.2f" % (mu, std)
plt.title(title)
plt.show()
```
Now we can randomly pick some kinde of number from this distribution by:
```
np.random.normal(mu, std)
```
Which means the final querying of firestore would look as follows.
**Note:** to combine the equality operator (`==`) with a range or array-contains clause (`<, <=, >, >=, or array_contains`), make sure to create a composite index (one for each `continent` + `osp_importance`). Also, you cannot do range filters on two variables. See query docs [here](https://firebase.google.com/docs/firestore/query-data/queries).
```
query = (
db
.collection("destinations")
.where('EU', '==', 1)
.where('osp_importance', '<=', np.random.normal(mu, std))
.order_by('osp_importance', direction=firestore.Query.DESCENDING)
.limit(3)
.get()
)
for doc in itertools.islice(query, 2):
print(u'{} => {}'.format(doc.id, doc.to_dict()['name']))
```
Next steps:
- Probably the distribution of `osp_importance` is highly variable per continent. Maybe fit something like this for each continent?
- The `<=` filter in combination with descending probably causes the top `osp_importance` destinations to be never selected even though you might want these to be selected the most. Think about whether this is desired.
## Ultimate query - assessing Firestore's fit
### Example Airbnb
It is quite possible that we eventually end up with a similar query as Airbnb makes:
https://www.airbnb.com/s/Barcelona--Spain/homes?refinement_paths%5B%5D=%2Fhomes
- ¤t_tab_id=home_tab
- &selected_tab_id=home_tab
- &metadata_only=false
- &version=1.7.0
- &items_per_grid=18
- &screen_size=large
- &map_toggle=true
- &search_type=unknown
- &hide_dates_and_guests_filters=false
- &ne_lat=43.97363914475397&ne_lng=5.173845810128569&sw_lat=38.69043481932856&sw_lng=-0.5720037992464313
- &zoom=7
- &search_by_map=true
- &checkin=2020-05-09
- &checkout=2020-05-10
- &adults=16
- &amenities%5B%5D=25&amenities%5B%5D=9
- &property_type_id%5B%5D=2&property_type_id%5B%5D=1
- &price_min=83&price_max=451
From here, we can distill some simple queries (`adults=16`), some range queries `price_min= & price_max=` and some array filters (`amenities= & amenities=`) - that could possibly be brought down to simple select queries.
### Own criteria
Eventually we will need to be able to query on quite some criteria ourselves:
* geolocation: `ne_lat`, `ne_lng`, `sw_lat`, `sw_lng`
- range query based on lat & lon?
- or, filter query based on geohash?
* price
- range query based on monetary amount: `budget_min`, `budget_max`
- or, [in](https://firebase.google.com/docs/firestore/query-data/queries#in_and_array-contains-any) query to check if destination budget type (like average) is in one of the requested by user (average - and thus also cheap): `budget_class`?
* period of travel
- in combination with other featuers like **weather**,
- range query of travel dates `checkin`, `checkout`,
- filter on `month` or `period` => possibly even select multiple months
* weather
- given a period or all months selected:
- range query based on temperature: `temp_min`, `temp_max`
- filter query based on weather type (sunny, rainy, cloudy, snow...) `temp_type`?
* activities
- [array contains](https://firebase.google.com/docs/firestore/query-data/queries#array_membership) query, check if user requested activities are in destination activities array: `activities`
- or multiple single filters (like airbnb): `activity`
* passport
* safety requirements
Most of these will be 'hard' filters that **must** be applied.
However, within this selection, we also want to do an **ordering** of the destinations that are most likely to be a fit with the end-user.
### Recommendation queries in old code
For recommendation queries we need to rank destinations based on some ordering. Based on the user profile and/or request, this ordering will be different and calculations will need to be done.
For example in our old code, sorting is done by summing feature scores for all selected features by the end-user and sorting on that:
query for recommendations in [`dao/bm25Recommendations.py`](https://github.com/Braamling/project_travel/blob/master/REST_API/project_travel/dao/bm25Recommendations.py)
```python
def query(self, columns, user_id, limit, offset):
dbHelper = DbHelpers()
order_by = dbHelper.build_feature_score_filter(columns)
query = "SELECT ds.destination_id FROM (`destination_scores` ds" +\
" INNER JOIN `destinations` d ON succes > 0 AND " +\
"ds.destination_id = d.id) LEFT JOIN `wishlist` w " +\
"ON d.id = w.destination_id AND w.user_id = %s " +\
"WHERE w.destination_id is NULL ORDER BY " +\
"( " + order_by + " ) DESC LIMIT %s OFFSET %s"
```
where the `order_by` constructs the combined feature score in [`dao/dbHelpers.py`](https://github.com/Braamling/project_travel/blob/master/REST_API/project_travel/dao/dbHelpers.py):
```python
def build_feature_score_filter(self, features):
valid_columns = self.get_column_names('destination_scores')
order_by = "( "
for feature in features[:-1]:
if feature in valid_columns:
order_by += feature + " + "
if features[-1] in valid_columns:
order_by += features[-1] + " )"
else:
order_by += " )"
return order_by
```
This demands a bit of flexibility of your system.
### Get requests in old code
Given that we have applied all of the above filters and got a destination id, this is how we used to retrieve the destination info:
join and receive all info from 1 destination in [`classes/destination.py`](https://github.com/Braamling/project_travel/blob/master/REST_API/project_travel/classes/destination.py)
```python
# Join and recieve all information about a destination.
query = "SELECT d.*, r.*, t.*, b.* FROM `destinations` d INNER JOIN" +\
" `rain` r ON d.id = %s AND d.temperature_id = " +\
" r.id INNER JOIN `temperatures` t ON d.temperature_id = t.id" +\
" INNER JOIN `attributes` a ON a.id = d.attr_id " +\
" INNER JOIN `budget` b on b.id = d.budget_id"
cur.execute(query, (destination_id))
```
It get's a bit more complicated if you want to take into account what destinations the user has already looked at. For example, this is how we got visited destinations in [`dao/destinations.py`](https://github.com/Braamling/project_travel/blob/master/REST_API/project_travel/dao/destinations.py)
```python
def get_visited(self, user_id):
cur = self.get_cursor()
query = "SELECT d.id, d.name, d.country_name, d.longitude, d.latitude FROM `visited` v INNER JOIN " +\
" `users` u ON u.id = %s AND u.id = v.user_id INNER JOIN " +\
"`destinations` d ON d.id = v.destination_id WHERE v.status = 'VISITED'"
cur.execute(query, (user_id))
```
This is obviously not the hardest part. But it will depend on where you store the user feedback.
### Conclusion: fit with Firestore
Firebase has a couple of limitations with regards to querying:
- You can use only one `in` or `array-contains-any` clause per query. You can't use both `in` and `array-contains-any` in the same query.
- You can combine `array-contains` with `in` but not with `array-contains-any`.
- You can only perform range comparisons (`<, <=, >, >=`) on a single field, and you can include at most one `array-contains` or `array-contains-any`clause in a compound query:
This means that if we want to continue with Firebase, we need to:
* Use **geohashing** for the 'hard' filtering based on location
* Wether retrieving all destinations from the 'hard' filter and calculating the smart stuff in the Flask app is good enough in terms of performance.
- compared to Bram's SQL code that works with an offset you will be retrieving significantly more data from the database each time Flask calls the DB... Can make it slow + expensive.
* Think about how to save user feedback (watched/likes/dislikes) and combine it in recommendation and bucketlist queries
Conclusion: it is probably wise to abandon Firestore and think of an alternative.
Done.
|
github_jupyter
|
from google.cloud import firestore
import pandas as pd
import itertools
import numpy as np
from scipy.stats import norm
import matplotlib.pyplot as plt
%matplotlib inline
db = firestore.Client.from_service_account_json("../credentials/stairway-firestore-key.json")
dest = pd.read_csv("../data/destinations.csv")
dest = dest.loc[dest['succes'] == 1] # only use OK destination data
dest.index = dest['id'] # set firestore document id equal to stairway destination id
dest.shape
feature = 'osp_importance'
# Fit a normal distribution to the data:
mu, std = norm.fit(dest[feature])
# Plot the histogram.
plt.hist(dest[feature], density=True, alpha=0.6)
# Plot the PDF.
xmin, xmax = plt.xlim()
x = np.linspace(xmin, xmax, 100)
p = norm.pdf(x, mu, std)
plt.plot(x, p, 'k', linewidth=2)
title = "Fit results: mu = %.2f, std = %.2f" % (mu, std)
plt.title(title)
plt.show()
np.random.normal(mu, std)
query = (
db
.collection("destinations")
.where('EU', '==', 1)
.where('osp_importance', '<=', np.random.normal(mu, std))
.order_by('osp_importance', direction=firestore.Query.DESCENDING)
.limit(3)
.get()
)
for doc in itertools.islice(query, 2):
print(u'{} => {}'.format(doc.id, doc.to_dict()['name']))
def query(self, columns, user_id, limit, offset):
dbHelper = DbHelpers()
order_by = dbHelper.build_feature_score_filter(columns)
query = "SELECT ds.destination_id FROM (`destination_scores` ds" +\
" INNER JOIN `destinations` d ON succes > 0 AND " +\
"ds.destination_id = d.id) LEFT JOIN `wishlist` w " +\
"ON d.id = w.destination_id AND w.user_id = %s " +\
"WHERE w.destination_id is NULL ORDER BY " +\
"( " + order_by + " ) DESC LIMIT %s OFFSET %s"
def build_feature_score_filter(self, features):
valid_columns = self.get_column_names('destination_scores')
order_by = "( "
for feature in features[:-1]:
if feature in valid_columns:
order_by += feature + " + "
if features[-1] in valid_columns:
order_by += features[-1] + " )"
else:
order_by += " )"
return order_by
# Join and recieve all information about a destination.
query = "SELECT d.*, r.*, t.*, b.* FROM `destinations` d INNER JOIN" +\
" `rain` r ON d.id = %s AND d.temperature_id = " +\
" r.id INNER JOIN `temperatures` t ON d.temperature_id = t.id" +\
" INNER JOIN `attributes` a ON a.id = d.attr_id " +\
" INNER JOIN `budget` b on b.id = d.budget_id"
cur.execute(query, (destination_id))
def get_visited(self, user_id):
cur = self.get_cursor()
query = "SELECT d.id, d.name, d.country_name, d.longitude, d.latitude FROM `visited` v INNER JOIN " +\
" `users` u ON u.id = %s AND u.id = v.user_id INNER JOIN " +\
"`destinations` d ON d.id = v.destination_id WHERE v.status = 'VISITED'"
cur.execute(query, (user_id))
| 0.481698 | 0.799481 |
```
import Numberjack
G = Numberjack.Matrix(9, 9, 1, 9, 'x')
model = Numberjack.Model()
a = model.add
visited = {}
def t(start, end, total):
results = []
r1, c1 = start
r2, c2 = end
if r1 == r2:
results = [G[r1][c] for c in range(c1, c2 + 1)]
elif c1 == c2:
results = [G[r][c1] for r in range(r1, r2 + 1)]
else:
raise Exception(start, end)
for exp in results:
visited[exp.name()] = True
model.add(Numberjack.Sum(results) == total)
model.add(Numberjack.AllDiff(results))
def r(row, start, end, total):
t((row, start), (row, end), total)
def c(col, start, end, total):
t((start, col), (end, col), total)
A = Numberjack.Variable([4, 8, 15, 16, 23, 42], 'A')
B = Numberjack.Variable([4, 15, 16, 23, 42], 'B')
C = Numberjack.Variable([4, 8, 15, 16, 23, 42], 'C')
D = Numberjack.Variable([4, 8, 15, 16, 23, 42], 'D')
E = Numberjack.Variable([4, 8, 15, 16, 23, 42], 'E')
F = Numberjack.Variable([4, 8, 15, 16, 23, 42], 'F')
LETTERS = [A, B, C, D, E, F]
model.add(Numberjack.AllDiff(LETTERS))
r(0, 0, 1, 9) ; r(0, 5, 6, B)
r(1, 0, 3, 25) ; r(1, 5, 7, 17)
r(2, 2, 3, 10) ; r(2, 7, 8, 5)
r(3, 1, 4, 13) ; r(3, 6, 8, 6)
r(4, 1, 7, F)
r(5, 0, 2, 22) ; r(5, 4, 7, 21)
r(6, 0, 1, 6) ; r(6, 5, 6, 5)
r(7, 1, 3, 15) ; r(7, 5, 8, 11)
r(8, 2, 3, D) ; r(8, 7, 8, 9)
c(0, 0, 1, 10) ; c(0, 5, 6, 13)
c(1, 0, 1, 12) ; c(1, 3, 7, 22)
c(2, 1, 5, C) ; c(2, 7, 8, 12)
c(3, 1, 4, 28) ; c(3, 7, 8, 3)
c(4, 3, 5, A)
c(5, 0, 1, 6) ; c(5, 4, 7, 20)
c(6, 0, 1, 12) ; c(6, 3, 7, E)
c(7, 1, 5, 17) ; c(7, 7, 8, 10)
c(8, 2, 3, 3) ; c(8, 7, 8, 6)
solver = model.load('MiniSat')
solver.solve()
n = 0
unchanged = {}
extractions = []
print("is_sat", solver.is_sat())
print("is_unsat", solver.is_unsat())
def cache(name, value):
coerced = '%2d' % value
if name not in unchanged:
unchanged[name] = coerced
elif unchanged[name] != coerced:
unchanged[name] = '?'
while solver.getNextSolution():
n += 1
print('#%s' % n)
for l in LETTERS:
print(l.name(), l.get_value())
cache(l.name(), l.get_value())
for r in range(9):
row = []
for c in range(9):
name = G[r][c].name()
if name in visited:
value = G[r][c].get_value()
cache(name, value)
row.append('%2d' % value)
else:
row.append('##')
print('\t'.join(row))
print('unchanged', unchanged)
for l in LETTERS:
print(l.name(), unchanged[l.name()])
for r in range(9):
row = []
for c in range(9):
name = G[r][c].name()
if name in visited:
row.append(unchanged[name])
else:
row.append('##')
print('\t'.join(row))
print(unchanged)
```
|
github_jupyter
|
import Numberjack
G = Numberjack.Matrix(9, 9, 1, 9, 'x')
model = Numberjack.Model()
a = model.add
visited = {}
def t(start, end, total):
results = []
r1, c1 = start
r2, c2 = end
if r1 == r2:
results = [G[r1][c] for c in range(c1, c2 + 1)]
elif c1 == c2:
results = [G[r][c1] for r in range(r1, r2 + 1)]
else:
raise Exception(start, end)
for exp in results:
visited[exp.name()] = True
model.add(Numberjack.Sum(results) == total)
model.add(Numberjack.AllDiff(results))
def r(row, start, end, total):
t((row, start), (row, end), total)
def c(col, start, end, total):
t((start, col), (end, col), total)
A = Numberjack.Variable([4, 8, 15, 16, 23, 42], 'A')
B = Numberjack.Variable([4, 15, 16, 23, 42], 'B')
C = Numberjack.Variable([4, 8, 15, 16, 23, 42], 'C')
D = Numberjack.Variable([4, 8, 15, 16, 23, 42], 'D')
E = Numberjack.Variable([4, 8, 15, 16, 23, 42], 'E')
F = Numberjack.Variable([4, 8, 15, 16, 23, 42], 'F')
LETTERS = [A, B, C, D, E, F]
model.add(Numberjack.AllDiff(LETTERS))
r(0, 0, 1, 9) ; r(0, 5, 6, B)
r(1, 0, 3, 25) ; r(1, 5, 7, 17)
r(2, 2, 3, 10) ; r(2, 7, 8, 5)
r(3, 1, 4, 13) ; r(3, 6, 8, 6)
r(4, 1, 7, F)
r(5, 0, 2, 22) ; r(5, 4, 7, 21)
r(6, 0, 1, 6) ; r(6, 5, 6, 5)
r(7, 1, 3, 15) ; r(7, 5, 8, 11)
r(8, 2, 3, D) ; r(8, 7, 8, 9)
c(0, 0, 1, 10) ; c(0, 5, 6, 13)
c(1, 0, 1, 12) ; c(1, 3, 7, 22)
c(2, 1, 5, C) ; c(2, 7, 8, 12)
c(3, 1, 4, 28) ; c(3, 7, 8, 3)
c(4, 3, 5, A)
c(5, 0, 1, 6) ; c(5, 4, 7, 20)
c(6, 0, 1, 12) ; c(6, 3, 7, E)
c(7, 1, 5, 17) ; c(7, 7, 8, 10)
c(8, 2, 3, 3) ; c(8, 7, 8, 6)
solver = model.load('MiniSat')
solver.solve()
n = 0
unchanged = {}
extractions = []
print("is_sat", solver.is_sat())
print("is_unsat", solver.is_unsat())
def cache(name, value):
coerced = '%2d' % value
if name not in unchanged:
unchanged[name] = coerced
elif unchanged[name] != coerced:
unchanged[name] = '?'
while solver.getNextSolution():
n += 1
print('#%s' % n)
for l in LETTERS:
print(l.name(), l.get_value())
cache(l.name(), l.get_value())
for r in range(9):
row = []
for c in range(9):
name = G[r][c].name()
if name in visited:
value = G[r][c].get_value()
cache(name, value)
row.append('%2d' % value)
else:
row.append('##')
print('\t'.join(row))
print('unchanged', unchanged)
for l in LETTERS:
print(l.name(), unchanged[l.name()])
for r in range(9):
row = []
for c in range(9):
name = G[r][c].name()
if name in visited:
row.append(unchanged[name])
else:
row.append('##')
print('\t'.join(row))
print(unchanged)
| 0.161684 | 0.404037 |
# R
```
orto_request = function(polygon) {
if (nrow(polygon) == 0) {
stop("no geometries")
}
selected_cols = c("godlo", "akt_rok", "piksel", "kolor", "zrDanych", "ukladXY",
"czy_ark_wypelniony", "url_do_pobrania", "idSerie", "sha1",
"nazwa_pliku")
selected_cols = paste(selected_cols, collapse = ",")
epsg = sf::st_crs(polygon)$epsg
# hard coded URL and parameters
base_URL = "https://mapy.geoportal.gov.pl/gprest/services/SkorowidzeFOTOMF/MapServer/0/query?"
geometryType = "&geometryType=esriGeometryEnvelope"
spatialRel = "&spatialRel=esriSpatialRelIntersects"
outFields = paste0("&outFields=", selected_cols)
returnGeometry = "&returnGeometry=false"
file = "&f=json"
# initial empty df (columns must be identical as in 'selected_cols')
empty_df = data.frame(godlo = character(),
akt_rok = integer(),
piksel = numeric(),
kolor = character(),
zrDanych = character(),
ukladXY = character(),
#modulArch = character(),
#nrZglosz = character(),
czy_ark_wypelniony = character(),
#daneAktualne = integer(),
#daneAktDo10cm = integer(),
#lok_orto = character(),
url_do_pobrania = character(),
idSerie = integer(),
sha1 = character(),
#akt_data = numeric(),
#idorto = integer(),
nazwa_pliku = character()
#ESRI_OID = integer()
)
for (i in seq_len(nrow(polygon))) {
bbox = sf::st_bbox(sf::st_geometry(polygon)[[i]])
# user input
geometry = paste0("geometry={'xmin':", bbox[1], ", 'ymin':", bbox[2], ", ",
"'xmax':", bbox[3], ", 'ymax':", bbox[4], ", ",
"'spatialReference':{'wkid':", epsg, "}}")
prepared_URL = paste0(base_URL, geometry, geometryType, spatialRel, outFields,
returnGeometry, file)
output = jsonlite::fromJSON(prepared_URL)
output = output$features[[1]]
# MaxRecordCount: 1000
if (nrow(output) == 1000) {
warning("maximum number of records, reduce the area")
}
empty_df = rbind(empty_df, output)
}
# remove duplicated images
empty_df = empty_df[!duplicated(empty_df$nazwa_pliku), ]
# postprocessing
colnames(empty_df) = c("sheetID", "year", "resolution", "composition",
"sensor", "CRS", "isFilled", "URL", "seriesID",
"sha1", "filename")
empty_df$composition = as.factor(empty_df$composition)
empty_df$CRS = as.factor(empty_df$CRS)
empty_df$isFilled = ifelse(empty_df$isFilled == "TAK", TRUE, FALSE)
empty_df$sensor = factor(empty_df$sensor,
levels = c("Scena sat.", "Zdj. analogowe", "Zdj. cyfrowe"),
labels = c("Satellite", "Analog", "Digital"))
return(empty_df)
}
```
```
import requests
def orto_request(polygon):
if len(polygon)==0:
print("no geometries")
return
selected_cols_list = ["godlo", "akt_rok", "piksel", "kolor", "zrDanych", "ukladXY",
"czy_ark_wypelniony", "url_do_pobrania", "idSerie", "sha1",
"nazwa_pliku"]
selected_cols = ','.join(selected_cols_list)
#epsg = sf::st_crs(polygon)$epsg
epsg="not-implemented-yet"
# hard coded URL and parameters
base_URL = "https://mapy.geoportal.gov.pl/gprest/services/SkorowidzeFOTOMF/MapServer/0/query?"
geometryType = "&geometryType=esriGeometryEnvelope"
spatialRel = "&spatialRel=esriSpatialRelIntersects"
outFields = "&outFields="+selected_cols
returnGeometry = "&returnGeometry=false"
file = "&f=json"
# # initial empty df (columns must be identical as in 'selected_cols')
# empty_df = data.frame(godlo = character(),
# akt_rok = integer(),
# piksel = numeric(),
# kolor = character(),
# zrDanych = character(),
# ukladXY = character(),
# #modulArch = character(),
# #nrZglosz = character(),
# czy_ark_wypelniony = character(),
# #daneAktualne = integer(),
# #daneAktDo10cm = integer(),
# #lok_orto = character(),
# url_do_pobrania = character(),
# idSerie = integer(),
# sha1 = character(),
# #akt_data = numeric(),
# #idorto = integer(),
# nazwa_pliku = character()
# #ESRI_OID = integer()
# )
results=[]
for i in range(len(polygon)):
# bbox = sf::st_bbox(sf::st_geometry(polygon)[[i]])
bbox = polygon[i]
# user input
geometry = ''.join([str(_) for _ in ["geometry={'xmin':", bbox[0], ", 'ymin':", bbox[1], ", ",
"'xmax':", bbox[2], ", 'ymax':", bbox[3], ", ",
"'spatialReference':{'wkid':", epsg, "}}"]])
prepared_URL = ''.join([base_URL, geometry, geometryType, spatialRel, outFields,
returnGeometry, file])
print("prepared_URL:",prepared_URL)
r = requests.get(prepared_URL)
print(r.status_code)
if r.status_code==200:
print(r.headers['content-type'])
print(r.encoding)
print(r.text)
print(r.json())
results.append(r.json())
# output = jsonlite::fromJSON(prepared_URL)
# output = output$features[[1]]
# # MaxRecordCount: 1000
# if (nrow(output) == 1000) {
# warning("maximum number of records, reduce the area")
# }
# empty_df = rbind(empty_df, output)
# }
# # remove duplicated images
# empty_df = empty_df[!duplicated(empty_df$nazwa_pliku), ]
# # postprocessing
# colnames(empty_df) = c("sheetID", "year", "resolution", "composition",
# "sensor", "CRS", "isFilled", "URL", "seriesID",
# "sha1", "filename")
# empty_df$composition = as.factor(empty_df$composition)
# empty_df$CRS = as.factor(empty_df$CRS)
# empty_df$isFilled = ifelse(empty_df$isFilled == "TAK", TRUE, FALSE)
# empty_df$sensor = factor(empty_df$sensor,
# levels = c("Scena sat.", "Zdj. analogowe", "Zdj. cyfrowe"),
# labels = c("Satellite", "Analog", "Digital"))
# return(empty_df)
return results
orto_request([[0,1,0,1],])
```
|
github_jupyter
|
orto_request = function(polygon) {
if (nrow(polygon) == 0) {
stop("no geometries")
}
selected_cols = c("godlo", "akt_rok", "piksel", "kolor", "zrDanych", "ukladXY",
"czy_ark_wypelniony", "url_do_pobrania", "idSerie", "sha1",
"nazwa_pliku")
selected_cols = paste(selected_cols, collapse = ",")
epsg = sf::st_crs(polygon)$epsg
# hard coded URL and parameters
base_URL = "https://mapy.geoportal.gov.pl/gprest/services/SkorowidzeFOTOMF/MapServer/0/query?"
geometryType = "&geometryType=esriGeometryEnvelope"
spatialRel = "&spatialRel=esriSpatialRelIntersects"
outFields = paste0("&outFields=", selected_cols)
returnGeometry = "&returnGeometry=false"
file = "&f=json"
# initial empty df (columns must be identical as in 'selected_cols')
empty_df = data.frame(godlo = character(),
akt_rok = integer(),
piksel = numeric(),
kolor = character(),
zrDanych = character(),
ukladXY = character(),
#modulArch = character(),
#nrZglosz = character(),
czy_ark_wypelniony = character(),
#daneAktualne = integer(),
#daneAktDo10cm = integer(),
#lok_orto = character(),
url_do_pobrania = character(),
idSerie = integer(),
sha1 = character(),
#akt_data = numeric(),
#idorto = integer(),
nazwa_pliku = character()
#ESRI_OID = integer()
)
for (i in seq_len(nrow(polygon))) {
bbox = sf::st_bbox(sf::st_geometry(polygon)[[i]])
# user input
geometry = paste0("geometry={'xmin':", bbox[1], ", 'ymin':", bbox[2], ", ",
"'xmax':", bbox[3], ", 'ymax':", bbox[4], ", ",
"'spatialReference':{'wkid':", epsg, "}}")
prepared_URL = paste0(base_URL, geometry, geometryType, spatialRel, outFields,
returnGeometry, file)
output = jsonlite::fromJSON(prepared_URL)
output = output$features[[1]]
# MaxRecordCount: 1000
if (nrow(output) == 1000) {
warning("maximum number of records, reduce the area")
}
empty_df = rbind(empty_df, output)
}
# remove duplicated images
empty_df = empty_df[!duplicated(empty_df$nazwa_pliku), ]
# postprocessing
colnames(empty_df) = c("sheetID", "year", "resolution", "composition",
"sensor", "CRS", "isFilled", "URL", "seriesID",
"sha1", "filename")
empty_df$composition = as.factor(empty_df$composition)
empty_df$CRS = as.factor(empty_df$CRS)
empty_df$isFilled = ifelse(empty_df$isFilled == "TAK", TRUE, FALSE)
empty_df$sensor = factor(empty_df$sensor,
levels = c("Scena sat.", "Zdj. analogowe", "Zdj. cyfrowe"),
labels = c("Satellite", "Analog", "Digital"))
return(empty_df)
}
import requests
def orto_request(polygon):
if len(polygon)==0:
print("no geometries")
return
selected_cols_list = ["godlo", "akt_rok", "piksel", "kolor", "zrDanych", "ukladXY",
"czy_ark_wypelniony", "url_do_pobrania", "idSerie", "sha1",
"nazwa_pliku"]
selected_cols = ','.join(selected_cols_list)
#epsg = sf::st_crs(polygon)$epsg
epsg="not-implemented-yet"
# hard coded URL and parameters
base_URL = "https://mapy.geoportal.gov.pl/gprest/services/SkorowidzeFOTOMF/MapServer/0/query?"
geometryType = "&geometryType=esriGeometryEnvelope"
spatialRel = "&spatialRel=esriSpatialRelIntersects"
outFields = "&outFields="+selected_cols
returnGeometry = "&returnGeometry=false"
file = "&f=json"
# # initial empty df (columns must be identical as in 'selected_cols')
# empty_df = data.frame(godlo = character(),
# akt_rok = integer(),
# piksel = numeric(),
# kolor = character(),
# zrDanych = character(),
# ukladXY = character(),
# #modulArch = character(),
# #nrZglosz = character(),
# czy_ark_wypelniony = character(),
# #daneAktualne = integer(),
# #daneAktDo10cm = integer(),
# #lok_orto = character(),
# url_do_pobrania = character(),
# idSerie = integer(),
# sha1 = character(),
# #akt_data = numeric(),
# #idorto = integer(),
# nazwa_pliku = character()
# #ESRI_OID = integer()
# )
results=[]
for i in range(len(polygon)):
# bbox = sf::st_bbox(sf::st_geometry(polygon)[[i]])
bbox = polygon[i]
# user input
geometry = ''.join([str(_) for _ in ["geometry={'xmin':", bbox[0], ", 'ymin':", bbox[1], ", ",
"'xmax':", bbox[2], ", 'ymax':", bbox[3], ", ",
"'spatialReference':{'wkid':", epsg, "}}"]])
prepared_URL = ''.join([base_URL, geometry, geometryType, spatialRel, outFields,
returnGeometry, file])
print("prepared_URL:",prepared_URL)
r = requests.get(prepared_URL)
print(r.status_code)
if r.status_code==200:
print(r.headers['content-type'])
print(r.encoding)
print(r.text)
print(r.json())
results.append(r.json())
# output = jsonlite::fromJSON(prepared_URL)
# output = output$features[[1]]
# # MaxRecordCount: 1000
# if (nrow(output) == 1000) {
# warning("maximum number of records, reduce the area")
# }
# empty_df = rbind(empty_df, output)
# }
# # remove duplicated images
# empty_df = empty_df[!duplicated(empty_df$nazwa_pliku), ]
# # postprocessing
# colnames(empty_df) = c("sheetID", "year", "resolution", "composition",
# "sensor", "CRS", "isFilled", "URL", "seriesID",
# "sha1", "filename")
# empty_df$composition = as.factor(empty_df$composition)
# empty_df$CRS = as.factor(empty_df$CRS)
# empty_df$isFilled = ifelse(empty_df$isFilled == "TAK", TRUE, FALSE)
# empty_df$sensor = factor(empty_df$sensor,
# levels = c("Scena sat.", "Zdj. analogowe", "Zdj. cyfrowe"),
# labels = c("Satellite", "Analog", "Digital"))
# return(empty_df)
return results
orto_request([[0,1,0,1],])
| 0.339171 | 0.69485 |
## Scaling to Minimum and Maximum values - MinMaxScaling
Minimum and maximum scaling squeezes the values between 0 and 1. It subtracts the minimum value from all the observations, and then divides it by the value range:
X_scaled = (X - X.min / (X.max - X.min)
```
import pandas as pd
# dataset for the demo
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
# the scaler - for min-max scaling
from sklearn.preprocessing import MinMaxScaler
# load the the Boston House price data
# this is how we load the boston dataset from sklearn
boston_dataset = load_boston()
# create a dataframe with the independent variables
data = pd.DataFrame(boston_dataset.data,
columns=boston_dataset.feature_names)
# add target
data['MEDV'] = boston_dataset.target
data.head()
# let's separate the data into training and testing set
X_train, X_test, y_train, y_test = train_test_split(data.drop('MEDV', axis=1),
data['MEDV'],
test_size=0.3,
random_state=0)
X_train.shape, X_test.shape
# set up the scaler
scaler = MinMaxScaler()
# fit the scaler to the train set, it will learn the parameters
scaler.fit(X_train)
# transform train and test sets
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
# the scaler stores the maximum values of the features, learned from train set
scaler.data_max_
# tthe scaler stores the minimum values of the features, learned from train set
scaler.min_
# the scaler also stores the value range (max - min)
scaler.data_range_
# let's transform the returned NumPy arrays to dataframes
X_train_scaled = pd.DataFrame(X_train_scaled, columns=X_train.columns)
X_test_scaled = pd.DataFrame(X_test_scaled, columns=X_test.columns)
# additional bit not in book
import matplotlib.pyplot as plt
import seaborn as sns
# let's compare the variable distributions before and after scaling
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(12, 5))
# before scaling
ax1.set_title('Before Scaling')
sns.kdeplot(X_train['RM'], ax=ax1)
sns.kdeplot(X_train['LSTAT'], ax=ax1)
sns.kdeplot(X_train['CRIM'], ax=ax1)
# after scaling
ax2.set_title('After Min-Max Scaling')
sns.kdeplot(X_train_scaled['RM'], ax=ax2)
sns.kdeplot(X_train_scaled['LSTAT'], ax=ax2)
sns.kdeplot(X_train_scaled['CRIM'], ax=ax2)
plt.show()
# let's compare the variable distributions before and after scaling
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(12, 5))
# before scaling
ax1.set_title('Before Scaling')
sns.kdeplot(X_train['AGE'], ax=ax1)
sns.kdeplot(X_train['DIS'], ax=ax1)
sns.kdeplot(X_train['NOX'], ax=ax1)
# after scaling
ax2.set_title('After Min-Max Scaling')
sns.kdeplot(X_train_scaled['AGE'], ax=ax2)
sns.kdeplot(X_train_scaled['DIS'], ax=ax2)
sns.kdeplot(X_train_scaled['NOX'], ax=ax2)
plt.show()
```
|
github_jupyter
|
import pandas as pd
# dataset for the demo
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
# the scaler - for min-max scaling
from sklearn.preprocessing import MinMaxScaler
# load the the Boston House price data
# this is how we load the boston dataset from sklearn
boston_dataset = load_boston()
# create a dataframe with the independent variables
data = pd.DataFrame(boston_dataset.data,
columns=boston_dataset.feature_names)
# add target
data['MEDV'] = boston_dataset.target
data.head()
# let's separate the data into training and testing set
X_train, X_test, y_train, y_test = train_test_split(data.drop('MEDV', axis=1),
data['MEDV'],
test_size=0.3,
random_state=0)
X_train.shape, X_test.shape
# set up the scaler
scaler = MinMaxScaler()
# fit the scaler to the train set, it will learn the parameters
scaler.fit(X_train)
# transform train and test sets
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
# the scaler stores the maximum values of the features, learned from train set
scaler.data_max_
# tthe scaler stores the minimum values of the features, learned from train set
scaler.min_
# the scaler also stores the value range (max - min)
scaler.data_range_
# let's transform the returned NumPy arrays to dataframes
X_train_scaled = pd.DataFrame(X_train_scaled, columns=X_train.columns)
X_test_scaled = pd.DataFrame(X_test_scaled, columns=X_test.columns)
# additional bit not in book
import matplotlib.pyplot as plt
import seaborn as sns
# let's compare the variable distributions before and after scaling
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(12, 5))
# before scaling
ax1.set_title('Before Scaling')
sns.kdeplot(X_train['RM'], ax=ax1)
sns.kdeplot(X_train['LSTAT'], ax=ax1)
sns.kdeplot(X_train['CRIM'], ax=ax1)
# after scaling
ax2.set_title('After Min-Max Scaling')
sns.kdeplot(X_train_scaled['RM'], ax=ax2)
sns.kdeplot(X_train_scaled['LSTAT'], ax=ax2)
sns.kdeplot(X_train_scaled['CRIM'], ax=ax2)
plt.show()
# let's compare the variable distributions before and after scaling
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(12, 5))
# before scaling
ax1.set_title('Before Scaling')
sns.kdeplot(X_train['AGE'], ax=ax1)
sns.kdeplot(X_train['DIS'], ax=ax1)
sns.kdeplot(X_train['NOX'], ax=ax1)
# after scaling
ax2.set_title('After Min-Max Scaling')
sns.kdeplot(X_train_scaled['AGE'], ax=ax2)
sns.kdeplot(X_train_scaled['DIS'], ax=ax2)
sns.kdeplot(X_train_scaled['NOX'], ax=ax2)
plt.show()
| 0.785761 | 0.977001 |
```
import en_core_web_sm
spacy_nlp = en_core_web_sm.load()
article = '''
Asian shares skidded on Tuesday after a rout in tech stocks put Wall Street to the sword, while a
sharp drop in oil prices and political risks in Europe pushed the dollar to 16-month highs as investors dumped
riskier assets. MSCI’s broadest index of Asia-Pacific shares outside Japan dropped 1.7 percent to a 1-1/2
week trough, with Australian shares sinking 1.6 percent. Japan’s Nikkei dived 3.1 percent led by losses in
electric machinery makers and suppliers of Apple’s iphone parts. Sterling fell to $1.286 after three straight
sessions of losses took it to the lowest since Nov.1 as there were still considerable unresolved issues with the
European Union over Brexit, British Prime Minister Theresa May said on Monday.'''
document = spacy_nlp(article)
# print token, dependency, POS tag
for tok in document:
print(tok.text, "-->",tok.dep_,"-->", tok.pos_)
for element in document.ents:
print('Type: %s, Value: %s' % (element.label_, element))
def subtree_matcher(doc):
x = ''
y = ''
# iterate through all the tokens in the input sentence
for i,tok in enumerate(doc):
# extract subject
if tok.dep_.find("subjpass") == True:
y = tok.text
# extract object
if tok.dep_.endswith("obj") == True:
x = tok.text
return x,y
subtree_matcher(document)
#define the pattern
from spacy.matcher import Matcher
matcher = Matcher(spacy_nlp.vocab)
pattern = [
{'POS':'NOUN','OP':"?"},
{'POS':'PRON','OP':"?"},
{'POS':'PROPN','OP':"?"},
{'POS':'VERB'},
{'DEP':'nummod','OP':"?"},
{'DEP':'nummod'}
]
matcher.add("matching_1", None, pattern)
matches = matcher(document)
span = document[matches[0][1]:matches[0][2]]
print(span.text)
#define the pattern
from spacy.matcher import Matcher
matcher = Matcher(spacy_nlp.vocab)
pattern = [
{'POS':'NOUN'},
{'DEP':'nummod','OP':"?"}
{'POS':'VERB'},
{'DEP':'nummod','OP':"?"}
]
matcher.add("matching_1", None, pattern)
matches = matcher(document)
span = document[matches[0][1]:matches[0][2]]
print(span.text)
```
## Abhishek's code
```
import pandas as pd
import numpy as np
import os
import en_core_web_sm
spacy_nlp = en_core_web_sm.load()
os.getcwd()
import pandas as pd
import numpy as np
import os
import en_core_web_sm
spacy_nlp = en_core_web_sm.load()
os.getcwd()
#os.chdir('C:\\Users\\abhishekpandey\\Desktop\\Hack19')
text_file = open('text_block.txt', 'r')
article_text = text_file.read()
text_file.close()
article_document = spacy_nlp(article_text)
elements_in_text = []
for element in article_document.ents:
print('Type: %s, Value: %s' % (element.label_, element))
temp_list = []
temp_list.append(element.label_)
temp_list.append(element)
elements_in_text.append(temp_list)
from spacy.matcher import Matcher
matcher = Matcher(spacy_nlp.vocab)
pattern = [ {'POS':'NOUN'},
{'POS':'VERB','OP':"?"},
{'POS':'NOUN'}
]
matcher.add("matching_1", None, pattern)
matches = matcher(article_document)
span = article_document[matches[0][1]:matches[0][2]]
print(span.text)
elements_is_speech = pd.DataFrame(elements_in_text)
elements_is_speech
```
|
github_jupyter
|
import en_core_web_sm
spacy_nlp = en_core_web_sm.load()
article = '''
Asian shares skidded on Tuesday after a rout in tech stocks put Wall Street to the sword, while a
sharp drop in oil prices and political risks in Europe pushed the dollar to 16-month highs as investors dumped
riskier assets. MSCI’s broadest index of Asia-Pacific shares outside Japan dropped 1.7 percent to a 1-1/2
week trough, with Australian shares sinking 1.6 percent. Japan’s Nikkei dived 3.1 percent led by losses in
electric machinery makers and suppliers of Apple’s iphone parts. Sterling fell to $1.286 after three straight
sessions of losses took it to the lowest since Nov.1 as there were still considerable unresolved issues with the
European Union over Brexit, British Prime Minister Theresa May said on Monday.'''
document = spacy_nlp(article)
# print token, dependency, POS tag
for tok in document:
print(tok.text, "-->",tok.dep_,"-->", tok.pos_)
for element in document.ents:
print('Type: %s, Value: %s' % (element.label_, element))
def subtree_matcher(doc):
x = ''
y = ''
# iterate through all the tokens in the input sentence
for i,tok in enumerate(doc):
# extract subject
if tok.dep_.find("subjpass") == True:
y = tok.text
# extract object
if tok.dep_.endswith("obj") == True:
x = tok.text
return x,y
subtree_matcher(document)
#define the pattern
from spacy.matcher import Matcher
matcher = Matcher(spacy_nlp.vocab)
pattern = [
{'POS':'NOUN','OP':"?"},
{'POS':'PRON','OP':"?"},
{'POS':'PROPN','OP':"?"},
{'POS':'VERB'},
{'DEP':'nummod','OP':"?"},
{'DEP':'nummod'}
]
matcher.add("matching_1", None, pattern)
matches = matcher(document)
span = document[matches[0][1]:matches[0][2]]
print(span.text)
#define the pattern
from spacy.matcher import Matcher
matcher = Matcher(spacy_nlp.vocab)
pattern = [
{'POS':'NOUN'},
{'DEP':'nummod','OP':"?"}
{'POS':'VERB'},
{'DEP':'nummod','OP':"?"}
]
matcher.add("matching_1", None, pattern)
matches = matcher(document)
span = document[matches[0][1]:matches[0][2]]
print(span.text)
import pandas as pd
import numpy as np
import os
import en_core_web_sm
spacy_nlp = en_core_web_sm.load()
os.getcwd()
import pandas as pd
import numpy as np
import os
import en_core_web_sm
spacy_nlp = en_core_web_sm.load()
os.getcwd()
#os.chdir('C:\\Users\\abhishekpandey\\Desktop\\Hack19')
text_file = open('text_block.txt', 'r')
article_text = text_file.read()
text_file.close()
article_document = spacy_nlp(article_text)
elements_in_text = []
for element in article_document.ents:
print('Type: %s, Value: %s' % (element.label_, element))
temp_list = []
temp_list.append(element.label_)
temp_list.append(element)
elements_in_text.append(temp_list)
from spacy.matcher import Matcher
matcher = Matcher(spacy_nlp.vocab)
pattern = [ {'POS':'NOUN'},
{'POS':'VERB','OP':"?"},
{'POS':'NOUN'}
]
matcher.add("matching_1", None, pattern)
matches = matcher(article_document)
span = article_document[matches[0][1]:matches[0][2]]
print(span.text)
elements_is_speech = pd.DataFrame(elements_in_text)
elements_is_speech
| 0.088497 | 0.485966 |
```
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
# Read in the data
df=pd.read_csv('crypto_data.csv')
df.head()
# Filter the DF to only the currency that is currently being traded.
IsTrading_df=df[df['IsTrading']==True]
IsTrading_df.head()
# Drop IsTrading Column
IsTrading_df=IsTrading_df.drop('IsTrading', 1).drop('Unnamed: 0', 1)
IsTrading_df
IsTrading_df.isnull().sum()
#Drop null values
IsTrading_df=IsTrading_df.dropna()
IsTrading_df
#Filter mined coins
IsTrading_df = IsTrading_df[IsTrading_df['TotalCoinsMined'] > 0]
IsTrading_df
#Save and Drop CoinName column
CoinName=pd.DataFrame(IsTrading_df['CoinName']).reset_index(drop=True)
CoinName
IsTrading_df=IsTrading_df.drop('CoinName', 1)
IsTrading_df
ColumnsToBeEncoded=['Algorithm', 'ProofType']
for column in ColumnsToBeEncoded:
le=LabelEncoder()
IsTrading_df[column] = le.fit_transform(IsTrading_df[column].values)
IsTrading_df
# Standardize the data using StandardScaler.
scaler = StandardScaler()
scaled_IsTrading = scaler.fit_transform(IsTrading_df)
print(scaled_IsTrading[0:5])
# Applying PCA to reduce dimensions to 90% of the explained variance.
# Initialize PCA model
pca =PCA(n_components=0.90)
# Get principal components of the data.
crypto_pca = pca.fit_transform(scaled_IsTrading)
# Transform PCA data to a DataFrame
df_crypto_pca = pd.DataFrame(
data=crypto_pca, columns=["principal component 1", "principal component 2", "principal componenet 3"]
)
df_crypto_pca.head()
# Fetch the explained variance.
pca.explained_variance_ratio_
# Perform t-SNE on the PCA data
tsne = TSNE(learning_rate = 100)
transformed_crypto = tsne.fit_transform(crypto_pca)
transformed_crypto[0]
# Create scatter plot from t-SNE data
x = transformed_crypto[:,0]
y = transformed_crypto[:,1]
plt.scatter(x, y)
plt.show()
# Identify best number of clusters using elbow curve
inertia = []
k = list(range(1, 11))
# Calculate the inertia for the range of k values
for i in k:
km = KMeans(n_clusters=i, random_state=0)
km.fit(transformed_crypto)
inertia.append(km.inertia_)
# Create the Elbow Curve using hvPlot
elbow_data = {"k": k, "inertia": inertia}
df_elbow = pd.DataFrame(elbow_data)
df_elbow.head()
# Plot the elbow curve to find the best candidate(s) for k
plt.plot(df_elbow['k'], df_elbow['inertia'])
plt.xticks(range(1,11))
plt.xlabel('Number of clusters')
plt.ylabel('Inertia')
plt.title('Elbow curve for customer data')
plt.show()
transform_crypto_df=pd.DataFrame(transformed_crypto).reset_index(drop=True)
transform_crypto_df
# Initialize the K-Means model.
model = KMeans(n_clusters=4, random_state=0)
# Fit the model
model.fit(transformed_crypto)
# Predict clusters
predictions = model.predict(transformed_crypto)
# Add class column to the DF
transform_crypto_df['class']=model.labels_
predictions
transform_crypto_df
clustered_df=pd.concat([CoinName, transform_crypto_df],axis=1, sort=False)
# clustered_df=CoinName_df.join(transform_crypto_df, how='outer')
print(clustered_df.shape)
clustered_df
plt.scatter(clustered_df[0], clustered_df[1], c=clustered_df['class'])
plt.show()
cluster1=clustered_df[clustered_df['class']==0]
cluster1
cluster1.to_csv("portfolio/cryptocurrencyPortfolio1.csv")
cluster2=clustered_df[clustered_df['class']==1]
cluster2
cluster2.to_csv("portfolio/cryptocurrencyPortfolio2.csv")
cluster3=clustered_df[clustered_df['class']==2]
cluster3
cluster3.to_csv("portfolio/cryptocurrencyPortfolio3.csv")
cluster4=clustered_df[clustered_df['class']==3]
cluster4
cluster4.to_csv("portfolio/cryptocurrencyPortfolio4.csv")
```
|
github_jupyter
|
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
# Read in the data
df=pd.read_csv('crypto_data.csv')
df.head()
# Filter the DF to only the currency that is currently being traded.
IsTrading_df=df[df['IsTrading']==True]
IsTrading_df.head()
# Drop IsTrading Column
IsTrading_df=IsTrading_df.drop('IsTrading', 1).drop('Unnamed: 0', 1)
IsTrading_df
IsTrading_df.isnull().sum()
#Drop null values
IsTrading_df=IsTrading_df.dropna()
IsTrading_df
#Filter mined coins
IsTrading_df = IsTrading_df[IsTrading_df['TotalCoinsMined'] > 0]
IsTrading_df
#Save and Drop CoinName column
CoinName=pd.DataFrame(IsTrading_df['CoinName']).reset_index(drop=True)
CoinName
IsTrading_df=IsTrading_df.drop('CoinName', 1)
IsTrading_df
ColumnsToBeEncoded=['Algorithm', 'ProofType']
for column in ColumnsToBeEncoded:
le=LabelEncoder()
IsTrading_df[column] = le.fit_transform(IsTrading_df[column].values)
IsTrading_df
# Standardize the data using StandardScaler.
scaler = StandardScaler()
scaled_IsTrading = scaler.fit_transform(IsTrading_df)
print(scaled_IsTrading[0:5])
# Applying PCA to reduce dimensions to 90% of the explained variance.
# Initialize PCA model
pca =PCA(n_components=0.90)
# Get principal components of the data.
crypto_pca = pca.fit_transform(scaled_IsTrading)
# Transform PCA data to a DataFrame
df_crypto_pca = pd.DataFrame(
data=crypto_pca, columns=["principal component 1", "principal component 2", "principal componenet 3"]
)
df_crypto_pca.head()
# Fetch the explained variance.
pca.explained_variance_ratio_
# Perform t-SNE on the PCA data
tsne = TSNE(learning_rate = 100)
transformed_crypto = tsne.fit_transform(crypto_pca)
transformed_crypto[0]
# Create scatter plot from t-SNE data
x = transformed_crypto[:,0]
y = transformed_crypto[:,1]
plt.scatter(x, y)
plt.show()
# Identify best number of clusters using elbow curve
inertia = []
k = list(range(1, 11))
# Calculate the inertia for the range of k values
for i in k:
km = KMeans(n_clusters=i, random_state=0)
km.fit(transformed_crypto)
inertia.append(km.inertia_)
# Create the Elbow Curve using hvPlot
elbow_data = {"k": k, "inertia": inertia}
df_elbow = pd.DataFrame(elbow_data)
df_elbow.head()
# Plot the elbow curve to find the best candidate(s) for k
plt.plot(df_elbow['k'], df_elbow['inertia'])
plt.xticks(range(1,11))
plt.xlabel('Number of clusters')
plt.ylabel('Inertia')
plt.title('Elbow curve for customer data')
plt.show()
transform_crypto_df=pd.DataFrame(transformed_crypto).reset_index(drop=True)
transform_crypto_df
# Initialize the K-Means model.
model = KMeans(n_clusters=4, random_state=0)
# Fit the model
model.fit(transformed_crypto)
# Predict clusters
predictions = model.predict(transformed_crypto)
# Add class column to the DF
transform_crypto_df['class']=model.labels_
predictions
transform_crypto_df
clustered_df=pd.concat([CoinName, transform_crypto_df],axis=1, sort=False)
# clustered_df=CoinName_df.join(transform_crypto_df, how='outer')
print(clustered_df.shape)
clustered_df
plt.scatter(clustered_df[0], clustered_df[1], c=clustered_df['class'])
plt.show()
cluster1=clustered_df[clustered_df['class']==0]
cluster1
cluster1.to_csv("portfolio/cryptocurrencyPortfolio1.csv")
cluster2=clustered_df[clustered_df['class']==1]
cluster2
cluster2.to_csv("portfolio/cryptocurrencyPortfolio2.csv")
cluster3=clustered_df[clustered_df['class']==2]
cluster3
cluster3.to_csv("portfolio/cryptocurrencyPortfolio3.csv")
cluster4=clustered_df[clustered_df['class']==3]
cluster4
cluster4.to_csv("portfolio/cryptocurrencyPortfolio4.csv")
| 0.789721 | 0.616546 |
```
%matplotlib notebook
import control as c
import ipywidgets as w
import numpy as np
from IPython.display import display, HTML
import matplotlib.pyplot as plt
import matplotlib.animation as animation
display(HTML('<script> $(document).ready(function() { $("div.input").hide(); }); </script>'))
```
## Controllo PID di un sistema del secondo ordine non smorzato o con smorzamento critico
Nell'esempio seguente, utilizzeremo un controller PID (o uno dei suoi sottotipi) per controllare un LTI del secondo ordine (sistema Lineare Tempo-Invariante) non smorzato o con smorzamento critico.
Entrambe le condizioni condividono la proprietà che la componente reale o immaginaria della loro coppia di poli è zero. Definiamo i loro modelli utilizzando le componenti diverse da zero.
<br>
$$G_{un}(s)=\frac{1}{s^2+A^2}\qquad\qquad G_{crit}(s)=\frac{1}{s^2+2As+A^2}$$
<br>
Esempi tipici di circuiti RLC seriali:
<br><br>
<table><tbody><tr>
<td><center><img src="Images/second_order_undamped.png" width="60%" /></center></td>
<td><center><img src="Images/second_order_adjustable.png" width="70%" /></center></td>
</tr>
<tr>
<td><center>Sistema non smorzato</center></td><td><center>Sistema con smorzamento critico $\left(R=2\sqrt{\frac{L}{C}}\right)$</center></td>
</tr></tbody></table>
<br>
Dove le funzioni di trasferimento possono essere espresse come:
<br>
$$G_{u}(s)=\frac{1}{LCs^2+1} \qquad\qquad G_{c}(s)=K\frac{1}{s^2+2\omega_0 s+\omega_0^2}=\frac{1}{LC}\frac{1}{s^2+\frac{R}{L}+\frac{1}{LC}} \qquad \omega_0=\frac{R}{2L}=\frac{1}{\sqrt{LC}}$$
<br>
<b>Scegli un tipo di smorzamento e un valore per i poli!</b>
```
# Damping selection
dampSelect = w.ToggleButtons(
options=[('Non smorzato', 0), ('Smorzamento critico', 1),],
description='Smorzamento: ', style={'description_width':'15%'})
display(dampSelect)
# Figure definition
fig1, ((f1_ax1), (f1_ax2)) = plt.subplots(2, 1)
fig1.set_size_inches((9.8, 5))
fig1.set_tight_layout(True)
f1_line1, = f1_ax1.plot([], [])
f1_line2, = f1_ax2.plot([], [])
f1_ax1.grid(which='both', axis='both', color='lightgray')
f1_ax2.grid(which='both', axis='both', color='lightgray')
f1_ax1.autoscale(enable=True, axis='both', tight=True)
f1_ax2.autoscale(enable=True, axis='both', tight=True)
f1_ax1.set_title('Diagramma del modulo', fontsize=11)
f1_ax1.set_xscale('log')
f1_ax1.set_xlabel(r'$f\/$[Hz]', labelpad=0, fontsize=10)
f1_ax1.set_ylabel(r'$A\/$[dB]', labelpad=0, fontsize=10)
f1_ax1.tick_params(axis='both', which='both', pad=0, labelsize=8)
f1_ax2.set_title('Diagramma della fase', fontsize=11)
f1_ax2.set_xscale('log')
f1_ax2.set_xlabel(r'$f\/$[Hz]', labelpad=0, fontsize=10)
f1_ax2.set_ylabel(r'$\phi\/$[°]', labelpad=0, fontsize=10)
f1_ax2.tick_params(axis='both', which='both', pad=0, labelsize=8)
# System model
def system_model(pole, damp_select):
if damp_select == 1: # Critically damped
W_sys = c.tf([1], [1, 2*pole, pole*pole])
else: # Undamped
W_sys = c.tf([1], [1, 0, pole*pole])
print('Funzione di trasferimento:')
print(W_sys)
# System analysis
poles = c.pole(W_sys) # Poles
print('Poli del sistema:\n')
print(poles)
global f1_line1, f1_line2
f1_ax1.lines.remove(f1_line1)
f1_ax2.lines.remove(f1_line2)
mag, phase, omega = c.bode_plot(W_sys, Plot=False) # Bode-plot
f1_line1, = f1_ax1.plot(omega/2/np.pi, 20*np.log10(mag), lw=1, color='blue')
f1_line2, = f1_ax2.plot(omega/2/np.pi, phase*180/np.pi, lw=1, color='blue')
f1_ax1.relim()
f1_ax2.relim()
f1_ax1.autoscale_view()
f1_ax2.autoscale_view()
# GUI widgets
def draw_slider(damp_select):
global pole_slider
if damp_select == 1: # Critically damped
pole_slider = w.FloatLogSlider(value=0.1, base=10, min=-4, max=1, description='Polo.Re (negativo):',
continuous_update=False, layout=w.Layout(width='75%'),
style={'description_width':'50%'})
else: # Undamped
pole_slider = w.FloatLogSlider(value=0.01, base=10, min=-4, max=1, description='Polo.Im:', continuous_update=False,
layout=w.Layout(width='75%'), style={'description_width':'50%'})
input_data = w.interactive_output(system_model, {'pole':pole_slider, 'damp_select':dampSelect})
display(w.HBox([pole_slider]), input_data)
w.interactive_output(draw_slider, {'damp_select':dampSelect})
```
Dopo aver osservato le caratteristiche del sistema, <b>seleziona un tipo di controllore!</b>
```
#Controller type select
typeSelect = w.ToggleButtons(
options=[('P', 0), ('PI', 1), ('PD', 2), ('PID', 3), ('PID Reale', 4)],
description='Controller: ', style={'description_width':'15%'})
display(typeSelect)
```
<b>Modifica i valori del controller in modo che il tempo di salita/assestamento, l'overshoot o l'errore rimanente siano minimi!</b><br>
Non è possibile ottenere i migliori risultati per ogni parametro in una singola configurazione. Crea più soluzioni, una per ogni tipo!
```
# PID control
# Figure definition
fig2, ((f2_ax1, f2_ax2, f2_ax3), (f2_ax4, f2_ax5, f2_ax6)) = plt.subplots(2, 3)
fig2.set_size_inches((9.8, 5))
fig2.set_tight_layout(True)
f2_line1, = f2_ax1.plot([], [])
f2_line2, = f2_ax2.plot([], [])
f2_line3, = f2_ax3.plot([], [])
f2_line4, = f2_ax4.plot([], [])
f2_line5, = f2_ax5.plot([], [])
f2_line6, = f2_ax6.plot([], [])
f2_ax1.grid(which='both', axis='both', color='lightgray')
f2_ax2.grid(which='both', axis='both', color='lightgray')
f2_ax3.grid(which='both', axis='both', color='lightgray')
f2_ax4.grid(which='both', axis='both', color='lightgray')
f2_ax5.grid(which='both', axis='both', color='lightgray')
f2_ax6.grid(which='both', axis='both', color='lightgray')
f2_ax1.autoscale(enable=True, axis='both', tight=True)
f2_ax2.autoscale(enable=True, axis='both', tight=True)
f2_ax3.autoscale(enable=True, axis='both', tight=True)
f2_ax4.autoscale(enable=True, axis='both', tight=True)
f2_ax5.autoscale(enable=True, axis='both', tight=True)
f2_ax6.autoscale(enable=True, axis='both', tight=True)
f2_ax1.set_title('Risposta al gradino in anello chiuso', fontsize=9)
f2_ax1.set_xlabel(r'$t\/$[s]', labelpad=0, fontsize=8)
f2_ax1.set_ylabel(r'$x\/$[m]', labelpad=0, fontsize=8)
f2_ax1.tick_params(axis='both', which='both', pad=0, labelsize=6)
f2_ax2.set_title('Diagramma di Nyquist', fontsize=9)
f2_ax2.set_xlabel(r'Re', labelpad=0, fontsize=8)
f2_ax2.set_ylabel(r'Im', labelpad=0, fontsize=8)
f2_ax2.tick_params(axis='both', which='both', pad=0, labelsize=6)
f2_ax3.set_title('Diagramma del modulo', fontsize=9)
f2_ax3.set_xscale('log')
f2_ax3.set_xlabel(r'$f\/$[Hz]', labelpad=0, fontsize=8)
f2_ax3.set_ylabel(r'$A\/$[dB]', labelpad=0, fontsize=8)
f2_ax3.tick_params(axis='both', which='both', pad=0, labelsize=6)
f2_ax4.set_title('Risposta impulsiva in anello chiuso', fontsize=9)
f2_ax4.set_xlabel(r'$t\/$[s]', labelpad=0, fontsize=8)
f2_ax4.set_ylabel(r'$x\/$[m]', labelpad=0, fontsize=8)
f2_ax4.tick_params(axis='both', which='both', pad=0, labelsize=6)
f2_ax5.set_title('Risposta al gradino in anello aperto', fontsize=9)
f2_ax5.set_xlabel(r'$t\/$[s]', labelpad=0, fontsize=8)
f2_ax5.set_ylabel(r'$x\/$[m]', labelpad=0, fontsize=8)
f2_ax5.tick_params(axis='both', which='both', pad=0, labelsize=6)
f2_ax6.set_title('Diagramma della fase', fontsize=9)
f2_ax6.set_xscale('log')
f2_ax6.set_xlabel(r'$f\/$[Hz]', labelpad=0, fontsize=8)
f2_ax6.set_ylabel(r'$\phi\/$[°]', labelpad=0, fontsize=8)
f2_ax6.tick_params(axis='both', which='both', pad=0, labelsize=6)
def pid_control(Kp, Ti, Td, Fd, type_select, pole, damp_select):
if damp_select == 1: # Critically damped
W_sys = c.tf([1], [1, 2*pole, pole*pole])
else: # Undamped
W_sys = c.tf([1], [1, 0, pole*pole])
if type_select in (1, 3, 4):
Ti0 = 1
else:
Ti0 = 0
if type_select in (2, 3, 4):
Td0 = 1
else :
Td0 = 0
if type_select == 4:
Fd0 = 1
else:
Fd0 = 0
# PID Controller
P = Kp # Proportional term
I = Kp / Ti # Integral term
D = Kp * Td # Derivative term
Td_f = Td / Fd # Derivative term filter
W_PID = c.parallel(c.tf([P], [1]),
c.tf([I * Ti0], [1 * Ti0, 1 * (not Ti0)]),
c.tf([D * Td0, 0], [Td_f * Td0 * Fd0, 1])) # PID controller in time constant format
W_open = c.series(W_PID, W_sys) # Open loop
W_closed = c.feedback(W_open, 1, -1) # Closed loop with negative feedback
# Display
global f2_line1, f2_line2, f2_line3, f2_line4, f2_line5, f2_line6
f2_ax1.lines.remove(f2_line1)
f2_ax2.lines.remove(f2_line2)
f2_ax3.lines.remove(f2_line3)
f2_ax4.lines.remove(f2_line4)
f2_ax5.lines.remove(f2_line5)
f2_ax6.lines.remove(f2_line6)
tout, yout = c.step_response(W_closed)
f2_line1, = f2_ax1.plot(tout, yout, lw=1, color='blue')
_, _, ob = c.nyquist_plot(W_open, Plot=False) # Small resolution plot to determine bounds
real, imag, freq = c.nyquist_plot(W_open, omega=np.logspace(np.log10(ob[0]), np.log10(ob[-1]), 1000), Plot=False)
f2_line2, = f2_ax2.plot(real, imag, lw=1, color='blue')
mag, phase, omega = c.bode_plot(W_open, Plot=False)
f2_line3, = f2_ax3.plot(omega/2/np.pi, 20*np.log10(mag), lw=1, color='blue')
f2_line6, = f2_ax6.plot(omega/2/np.pi, phase*180/np.pi, lw=1, color='blue')
tout, yout = c.impulse_response(W_closed)
f2_line4, = f2_ax4.plot(tout, yout, lw=1, color='blue')
tout, yout = c.step_response(W_open)
f2_line5, = f2_ax5.plot(tout, yout, lw=1, color='blue')
f2_ax1.relim()
f2_ax2.relim()
f2_ax3.relim()
f2_ax4.relim()
f2_ax5.relim()
f2_ax6.relim()
f2_ax1.autoscale_view()
f2_ax2.autoscale_view()
f2_ax3.autoscale_view()
f2_ax4.autoscale_view()
f2_ax5.autoscale_view()
f2_ax6.autoscale_view()
# GUI widgets
def draw_controllers(type_select):
global Kp_slider
global Ti_slider
global Td_slider
global Fd_slider
Kp_slider = w.FloatLogSlider(value=20, base=10, min=-1, max=4, description='Kp:', continuous_update=False,
layout=w.Layout(width='auto', flex='5 5 auto'))
if type_select in (1, 3, 4):
Ti_slider = w.FloatLogSlider(value=0.0035, base=10, min=-4, max=1, description='Ti:', continuous_update=False,
layout=w.Layout(width='auto', flex='5 5 auto'))
else:
Ti_slider = w.FloatLogSlider(value=0.0035, base=10, min=-4, max=1, description='Ti:', continuous_update=False,
layout=w.Layout(width='auto', flex='5 5 auto'), disabled=True)
if type_select in (2, 3, 4):
Td_slider = w.FloatLogSlider(value=1, base=10, min=-4, max=1, description='Td:', continuous_update=False,
layout=w.Layout(width='auto', flex='5 5 auto'))
else:
Td_slider = w.FloatLogSlider(value=1, base=10, min=-4, max=1, description='Td:', continuous_update=False,
layout=w.Layout(width='auto', flex='5 5 auto'), disabled=True)
if type_select == 4:
Fd_slider = w.FloatLogSlider(value=1, base=10, min=0, max=3, description='Fd:', continuous_update=False,
layout=w.Layout(width='auto', flex='5 5 auto'))
else:
Fd_slider = w.FloatLogSlider(value=1, base=10, min=0, max=3, description='Fd:', continuous_update=False,
layout=w.Layout(width='auto', flex='5 5 auto'), disabled=True)
input_data = w.interactive_output(pid_control, {'Kp': Kp_slider, 'Ti': Ti_slider, 'Td': Td_slider,
'Fd': Fd_slider, 'type_select':typeSelect, 'pole':pole_slider, 'damp_select':dampSelect})
display(w.HBox([Kp_slider, Ti_slider, Td_slider, Fd_slider]), input_data)
w.interactive_output(draw_controllers, {'type_select':typeSelect})
```
È possibile testare le funzionalità di inseguimento del riferimento del sistema controllato utilizzando la simulazione.<br>
<b>Modifica il controller in modo che sia capace di seguire una sinusoide in modo accettabile!</b>
<br><br>
<i>(Le animazioni vengono ridimensionate per adattarsi alla figura durante l'intera simulazione. A causa di questo, le soluzioni instabili potrebbero sembrare non muoversi prima dell'ultimo istante.)</i>
```
# Simulation data
anim_fig = plt.figure()
anim_fig.set_size_inches((9.8, 4))
anim_fig.set_tight_layout(True)
anim_ax1 = anim_fig.add_subplot(111)
frame_count=1000
scope_rounds=4
l1 = anim_ax1.plot([], [], lw=1, color='blue')
l2 = anim_ax1.plot([], [], lw=2, color='red')
line1 = l1[0]
line2 = l2[0]
anim_ax1.legend(l1+l2, ['Riferimento', 'Uscita'], loc=1)
anim_ax1.set_title('Simulazione', fontsize=12)
anim_ax1.set_xlabel(r'$t\/$[s]', labelpad=0, fontsize=10)
anim_ax1.set_ylabel(r'$y\/$[/]', labelpad=0, fontsize=10)
anim_ax1.tick_params(axis='both', which='both', pad=0, labelsize=8)
anim_ax1.grid(which='both', axis='both', color='lightgray')
T_plot = []
X_plot = []
R_plot = []
#Simulation function
def simulation(Kp, Ti, Td, Fd, type_select, pole, damp_select, T, dt, X, Xf, Xa):
if damp_select == 1: # Critically damped
W_sys = c.tf([1], [1, 2*pole, pole*pole])
else: # Undamped
W_sys = c.tf([1], [1, 0, pole*pole])
if type_select in (1, 3, 4):
Ti0 = 1
else:
Ti0 = 0
if type_select in (2, 3, 4):
Td0 = 1
else :
Td0 = 0
if type_select == 4:
Fd0 = 1
else:
Fd0 = 0
# Controller
P = Kp # Proportional term
I = Kp / Ti # Integral term
D = Kp * Td # Derivative term
Td_f = Td * Fd # Derivative term filter
W_PID = c.parallel(c.tf([P], [1]),
c.tf([I * Ti0], [1 * Ti0, 1 * (not Ti0)]),
c.tf([D * Td0, 0], [Td_f * Td0 * Fd0, 1])) # PID controller
# Model
W_open = c.series(W_PID, W_sys) # Open loop
W_closed = c.feedback(W_open, 1, -1) # Closed loop with negative feedback
# Reference and disturbance signals
T_sim = np.arange(0, T, dt, dtype=np.float64)
if X == 0: # Sine wave reference
X_sim = np.sin(2 * np.pi * Xf * T_sim) * Xa
elif X == 1: # Square wave reference
X_sim = np.sign(np.sin(2 * np.pi * Xf * T_sim)) * Xa
# System response
Tx, youtx, xoutx = c.forced_response(W_closed, T_sim, X_sim)
R_sim = youtx
# Display
XR_max = max(np.amax(np.absolute(np.concatenate((X_sim, R_sim)))), Xa)
anim_ax1.set_ylim((-1.2 * XR_max, 1.2 * XR_max))
global T_plot, X_plot, R_plot
T_plot = np.linspace(0, T, frame_count*(scope_rounds+1), dtype=np.float32)
X_plot = np.interp(T_plot, T_sim, X_sim)
R_plot = np.interp(T_plot, T_sim, R_sim)
def anim_init():
line1.set_data([], [])
line2.set_data([], [])
anim_ax1.set_xlim((0, T_plot[frame_count-1]))
return (line1, line2, anim_ax1,)
def animate(i):
line1.set_data(T_plot[scope_rounds*i:scope_rounds*i+frame_count-1], X_plot[scope_rounds*i:scope_rounds*i+frame_count-1])
line2.set_data(T_plot[scope_rounds*i:scope_rounds*i+frame_count-1], R_plot[scope_rounds*i:scope_rounds*i+frame_count-1])
anim_ax1.set_xlim((T_plot[i*scope_rounds], T_plot[i*scope_rounds+frame_count-1]))
return (line1, line2, anim_ax1,)
anim = animation.FuncAnimation(anim_fig, animate, init_func=anim_init,
frames=frame_count, interval=10, blit=True,
repeat=True)
# Controllers
T_slider = w.FloatLogSlider(value=10, base=10, min=-0.7, max=1, step=0.01,
description='Durata [s]:', continuous_update=False,
orientation='vertical', layout=w.Layout(width='auto', height='auto', flex='1 1 auto'))
dt_slider = w.FloatLogSlider(value=0.1, base=10, min=-3, max=-1, step=0.01,
description='Campionamento [s]:', continuous_update=False,
orientation='vertical', layout=w.Layout(width='auto', height='auto', flex='1 1 auto'))
X_type = w.Dropdown(options=[('Sinusoide', 0), ('Onda quadra', 1)], value=1,
description='Riferimento: ', continuous_update=False, layout=w.Layout(width='auto', flex='3 3 auto'))
Xf_slider = w.FloatLogSlider(value=0.5, base=10, min=-2, max=2, step=0.01,
description='Frequenza [Hz]:', continuous_update=False,
orientation='vertical', layout=w.Layout(width='auto', height='auto', flex='1 1 auto'))
Xa_slider = w.FloatLogSlider(value=1, base=10, min=-2, max=2, step=0.01,
description='Ampiezza [/]:', continuous_update=False,
orientation='vertical', layout=w.Layout(width='auto', height='auto', flex='1 1 auto'))
input_data = w.interactive_output(simulation, {'Kp': Kp_slider, 'Ti': Ti_slider, 'Td': Td_slider,'Fd': Fd_slider,
'type_select': typeSelect, 'pole':pole_slider, 'damp_select':dampSelect,
'T': T_slider, 'dt': dt_slider,
'X': X_type, 'Xf': Xf_slider, 'Xa': Xa_slider})
display(w.HBox([w.HBox([T_slider, dt_slider], layout=w.Layout(width='25%')),
w.Box([], layout=w.Layout(width='5%')),
w.VBox([X_type, w.HBox([Xf_slider, Xa_slider])], layout=w.Layout(width='30%')),
w.Box([], layout=w.Layout(width='5%'))],
layout=w.Layout(width='100%', justify_content='center')), input_data)
```
Il parametro della durata controlla il periodo di tempo simulato e non influisce sul tempo di esecuzione dell'animazione. Al contrario, il campionamento del modello può perfezionare i risultati in cambio di risorse di calcolo più elevate.
|
github_jupyter
|
%matplotlib notebook
import control as c
import ipywidgets as w
import numpy as np
from IPython.display import display, HTML
import matplotlib.pyplot as plt
import matplotlib.animation as animation
display(HTML('<script> $(document).ready(function() { $("div.input").hide(); }); </script>'))
# Damping selection
dampSelect = w.ToggleButtons(
options=[('Non smorzato', 0), ('Smorzamento critico', 1),],
description='Smorzamento: ', style={'description_width':'15%'})
display(dampSelect)
# Figure definition
fig1, ((f1_ax1), (f1_ax2)) = plt.subplots(2, 1)
fig1.set_size_inches((9.8, 5))
fig1.set_tight_layout(True)
f1_line1, = f1_ax1.plot([], [])
f1_line2, = f1_ax2.plot([], [])
f1_ax1.grid(which='both', axis='both', color='lightgray')
f1_ax2.grid(which='both', axis='both', color='lightgray')
f1_ax1.autoscale(enable=True, axis='both', tight=True)
f1_ax2.autoscale(enable=True, axis='both', tight=True)
f1_ax1.set_title('Diagramma del modulo', fontsize=11)
f1_ax1.set_xscale('log')
f1_ax1.set_xlabel(r'$f\/$[Hz]', labelpad=0, fontsize=10)
f1_ax1.set_ylabel(r'$A\/$[dB]', labelpad=0, fontsize=10)
f1_ax1.tick_params(axis='both', which='both', pad=0, labelsize=8)
f1_ax2.set_title('Diagramma della fase', fontsize=11)
f1_ax2.set_xscale('log')
f1_ax2.set_xlabel(r'$f\/$[Hz]', labelpad=0, fontsize=10)
f1_ax2.set_ylabel(r'$\phi\/$[°]', labelpad=0, fontsize=10)
f1_ax2.tick_params(axis='both', which='both', pad=0, labelsize=8)
# System model
def system_model(pole, damp_select):
if damp_select == 1: # Critically damped
W_sys = c.tf([1], [1, 2*pole, pole*pole])
else: # Undamped
W_sys = c.tf([1], [1, 0, pole*pole])
print('Funzione di trasferimento:')
print(W_sys)
# System analysis
poles = c.pole(W_sys) # Poles
print('Poli del sistema:\n')
print(poles)
global f1_line1, f1_line2
f1_ax1.lines.remove(f1_line1)
f1_ax2.lines.remove(f1_line2)
mag, phase, omega = c.bode_plot(W_sys, Plot=False) # Bode-plot
f1_line1, = f1_ax1.plot(omega/2/np.pi, 20*np.log10(mag), lw=1, color='blue')
f1_line2, = f1_ax2.plot(omega/2/np.pi, phase*180/np.pi, lw=1, color='blue')
f1_ax1.relim()
f1_ax2.relim()
f1_ax1.autoscale_view()
f1_ax2.autoscale_view()
# GUI widgets
def draw_slider(damp_select):
global pole_slider
if damp_select == 1: # Critically damped
pole_slider = w.FloatLogSlider(value=0.1, base=10, min=-4, max=1, description='Polo.Re (negativo):',
continuous_update=False, layout=w.Layout(width='75%'),
style={'description_width':'50%'})
else: # Undamped
pole_slider = w.FloatLogSlider(value=0.01, base=10, min=-4, max=1, description='Polo.Im:', continuous_update=False,
layout=w.Layout(width='75%'), style={'description_width':'50%'})
input_data = w.interactive_output(system_model, {'pole':pole_slider, 'damp_select':dampSelect})
display(w.HBox([pole_slider]), input_data)
w.interactive_output(draw_slider, {'damp_select':dampSelect})
#Controller type select
typeSelect = w.ToggleButtons(
options=[('P', 0), ('PI', 1), ('PD', 2), ('PID', 3), ('PID Reale', 4)],
description='Controller: ', style={'description_width':'15%'})
display(typeSelect)
# PID control
# Figure definition
fig2, ((f2_ax1, f2_ax2, f2_ax3), (f2_ax4, f2_ax5, f2_ax6)) = plt.subplots(2, 3)
fig2.set_size_inches((9.8, 5))
fig2.set_tight_layout(True)
f2_line1, = f2_ax1.plot([], [])
f2_line2, = f2_ax2.plot([], [])
f2_line3, = f2_ax3.plot([], [])
f2_line4, = f2_ax4.plot([], [])
f2_line5, = f2_ax5.plot([], [])
f2_line6, = f2_ax6.plot([], [])
f2_ax1.grid(which='both', axis='both', color='lightgray')
f2_ax2.grid(which='both', axis='both', color='lightgray')
f2_ax3.grid(which='both', axis='both', color='lightgray')
f2_ax4.grid(which='both', axis='both', color='lightgray')
f2_ax5.grid(which='both', axis='both', color='lightgray')
f2_ax6.grid(which='both', axis='both', color='lightgray')
f2_ax1.autoscale(enable=True, axis='both', tight=True)
f2_ax2.autoscale(enable=True, axis='both', tight=True)
f2_ax3.autoscale(enable=True, axis='both', tight=True)
f2_ax4.autoscale(enable=True, axis='both', tight=True)
f2_ax5.autoscale(enable=True, axis='both', tight=True)
f2_ax6.autoscale(enable=True, axis='both', tight=True)
f2_ax1.set_title('Risposta al gradino in anello chiuso', fontsize=9)
f2_ax1.set_xlabel(r'$t\/$[s]', labelpad=0, fontsize=8)
f2_ax1.set_ylabel(r'$x\/$[m]', labelpad=0, fontsize=8)
f2_ax1.tick_params(axis='both', which='both', pad=0, labelsize=6)
f2_ax2.set_title('Diagramma di Nyquist', fontsize=9)
f2_ax2.set_xlabel(r'Re', labelpad=0, fontsize=8)
f2_ax2.set_ylabel(r'Im', labelpad=0, fontsize=8)
f2_ax2.tick_params(axis='both', which='both', pad=0, labelsize=6)
f2_ax3.set_title('Diagramma del modulo', fontsize=9)
f2_ax3.set_xscale('log')
f2_ax3.set_xlabel(r'$f\/$[Hz]', labelpad=0, fontsize=8)
f2_ax3.set_ylabel(r'$A\/$[dB]', labelpad=0, fontsize=8)
f2_ax3.tick_params(axis='both', which='both', pad=0, labelsize=6)
f2_ax4.set_title('Risposta impulsiva in anello chiuso', fontsize=9)
f2_ax4.set_xlabel(r'$t\/$[s]', labelpad=0, fontsize=8)
f2_ax4.set_ylabel(r'$x\/$[m]', labelpad=0, fontsize=8)
f2_ax4.tick_params(axis='both', which='both', pad=0, labelsize=6)
f2_ax5.set_title('Risposta al gradino in anello aperto', fontsize=9)
f2_ax5.set_xlabel(r'$t\/$[s]', labelpad=0, fontsize=8)
f2_ax5.set_ylabel(r'$x\/$[m]', labelpad=0, fontsize=8)
f2_ax5.tick_params(axis='both', which='both', pad=0, labelsize=6)
f2_ax6.set_title('Diagramma della fase', fontsize=9)
f2_ax6.set_xscale('log')
f2_ax6.set_xlabel(r'$f\/$[Hz]', labelpad=0, fontsize=8)
f2_ax6.set_ylabel(r'$\phi\/$[°]', labelpad=0, fontsize=8)
f2_ax6.tick_params(axis='both', which='both', pad=0, labelsize=6)
def pid_control(Kp, Ti, Td, Fd, type_select, pole, damp_select):
if damp_select == 1: # Critically damped
W_sys = c.tf([1], [1, 2*pole, pole*pole])
else: # Undamped
W_sys = c.tf([1], [1, 0, pole*pole])
if type_select in (1, 3, 4):
Ti0 = 1
else:
Ti0 = 0
if type_select in (2, 3, 4):
Td0 = 1
else :
Td0 = 0
if type_select == 4:
Fd0 = 1
else:
Fd0 = 0
# PID Controller
P = Kp # Proportional term
I = Kp / Ti # Integral term
D = Kp * Td # Derivative term
Td_f = Td / Fd # Derivative term filter
W_PID = c.parallel(c.tf([P], [1]),
c.tf([I * Ti0], [1 * Ti0, 1 * (not Ti0)]),
c.tf([D * Td0, 0], [Td_f * Td0 * Fd0, 1])) # PID controller in time constant format
W_open = c.series(W_PID, W_sys) # Open loop
W_closed = c.feedback(W_open, 1, -1) # Closed loop with negative feedback
# Display
global f2_line1, f2_line2, f2_line3, f2_line4, f2_line5, f2_line6
f2_ax1.lines.remove(f2_line1)
f2_ax2.lines.remove(f2_line2)
f2_ax3.lines.remove(f2_line3)
f2_ax4.lines.remove(f2_line4)
f2_ax5.lines.remove(f2_line5)
f2_ax6.lines.remove(f2_line6)
tout, yout = c.step_response(W_closed)
f2_line1, = f2_ax1.plot(tout, yout, lw=1, color='blue')
_, _, ob = c.nyquist_plot(W_open, Plot=False) # Small resolution plot to determine bounds
real, imag, freq = c.nyquist_plot(W_open, omega=np.logspace(np.log10(ob[0]), np.log10(ob[-1]), 1000), Plot=False)
f2_line2, = f2_ax2.plot(real, imag, lw=1, color='blue')
mag, phase, omega = c.bode_plot(W_open, Plot=False)
f2_line3, = f2_ax3.plot(omega/2/np.pi, 20*np.log10(mag), lw=1, color='blue')
f2_line6, = f2_ax6.plot(omega/2/np.pi, phase*180/np.pi, lw=1, color='blue')
tout, yout = c.impulse_response(W_closed)
f2_line4, = f2_ax4.plot(tout, yout, lw=1, color='blue')
tout, yout = c.step_response(W_open)
f2_line5, = f2_ax5.plot(tout, yout, lw=1, color='blue')
f2_ax1.relim()
f2_ax2.relim()
f2_ax3.relim()
f2_ax4.relim()
f2_ax5.relim()
f2_ax6.relim()
f2_ax1.autoscale_view()
f2_ax2.autoscale_view()
f2_ax3.autoscale_view()
f2_ax4.autoscale_view()
f2_ax5.autoscale_view()
f2_ax6.autoscale_view()
# GUI widgets
def draw_controllers(type_select):
global Kp_slider
global Ti_slider
global Td_slider
global Fd_slider
Kp_slider = w.FloatLogSlider(value=20, base=10, min=-1, max=4, description='Kp:', continuous_update=False,
layout=w.Layout(width='auto', flex='5 5 auto'))
if type_select in (1, 3, 4):
Ti_slider = w.FloatLogSlider(value=0.0035, base=10, min=-4, max=1, description='Ti:', continuous_update=False,
layout=w.Layout(width='auto', flex='5 5 auto'))
else:
Ti_slider = w.FloatLogSlider(value=0.0035, base=10, min=-4, max=1, description='Ti:', continuous_update=False,
layout=w.Layout(width='auto', flex='5 5 auto'), disabled=True)
if type_select in (2, 3, 4):
Td_slider = w.FloatLogSlider(value=1, base=10, min=-4, max=1, description='Td:', continuous_update=False,
layout=w.Layout(width='auto', flex='5 5 auto'))
else:
Td_slider = w.FloatLogSlider(value=1, base=10, min=-4, max=1, description='Td:', continuous_update=False,
layout=w.Layout(width='auto', flex='5 5 auto'), disabled=True)
if type_select == 4:
Fd_slider = w.FloatLogSlider(value=1, base=10, min=0, max=3, description='Fd:', continuous_update=False,
layout=w.Layout(width='auto', flex='5 5 auto'))
else:
Fd_slider = w.FloatLogSlider(value=1, base=10, min=0, max=3, description='Fd:', continuous_update=False,
layout=w.Layout(width='auto', flex='5 5 auto'), disabled=True)
input_data = w.interactive_output(pid_control, {'Kp': Kp_slider, 'Ti': Ti_slider, 'Td': Td_slider,
'Fd': Fd_slider, 'type_select':typeSelect, 'pole':pole_slider, 'damp_select':dampSelect})
display(w.HBox([Kp_slider, Ti_slider, Td_slider, Fd_slider]), input_data)
w.interactive_output(draw_controllers, {'type_select':typeSelect})
# Simulation data
anim_fig = plt.figure()
anim_fig.set_size_inches((9.8, 4))
anim_fig.set_tight_layout(True)
anim_ax1 = anim_fig.add_subplot(111)
frame_count=1000
scope_rounds=4
l1 = anim_ax1.plot([], [], lw=1, color='blue')
l2 = anim_ax1.plot([], [], lw=2, color='red')
line1 = l1[0]
line2 = l2[0]
anim_ax1.legend(l1+l2, ['Riferimento', 'Uscita'], loc=1)
anim_ax1.set_title('Simulazione', fontsize=12)
anim_ax1.set_xlabel(r'$t\/$[s]', labelpad=0, fontsize=10)
anim_ax1.set_ylabel(r'$y\/$[/]', labelpad=0, fontsize=10)
anim_ax1.tick_params(axis='both', which='both', pad=0, labelsize=8)
anim_ax1.grid(which='both', axis='both', color='lightgray')
T_plot = []
X_plot = []
R_plot = []
#Simulation function
def simulation(Kp, Ti, Td, Fd, type_select, pole, damp_select, T, dt, X, Xf, Xa):
if damp_select == 1: # Critically damped
W_sys = c.tf([1], [1, 2*pole, pole*pole])
else: # Undamped
W_sys = c.tf([1], [1, 0, pole*pole])
if type_select in (1, 3, 4):
Ti0 = 1
else:
Ti0 = 0
if type_select in (2, 3, 4):
Td0 = 1
else :
Td0 = 0
if type_select == 4:
Fd0 = 1
else:
Fd0 = 0
# Controller
P = Kp # Proportional term
I = Kp / Ti # Integral term
D = Kp * Td # Derivative term
Td_f = Td * Fd # Derivative term filter
W_PID = c.parallel(c.tf([P], [1]),
c.tf([I * Ti0], [1 * Ti0, 1 * (not Ti0)]),
c.tf([D * Td0, 0], [Td_f * Td0 * Fd0, 1])) # PID controller
# Model
W_open = c.series(W_PID, W_sys) # Open loop
W_closed = c.feedback(W_open, 1, -1) # Closed loop with negative feedback
# Reference and disturbance signals
T_sim = np.arange(0, T, dt, dtype=np.float64)
if X == 0: # Sine wave reference
X_sim = np.sin(2 * np.pi * Xf * T_sim) * Xa
elif X == 1: # Square wave reference
X_sim = np.sign(np.sin(2 * np.pi * Xf * T_sim)) * Xa
# System response
Tx, youtx, xoutx = c.forced_response(W_closed, T_sim, X_sim)
R_sim = youtx
# Display
XR_max = max(np.amax(np.absolute(np.concatenate((X_sim, R_sim)))), Xa)
anim_ax1.set_ylim((-1.2 * XR_max, 1.2 * XR_max))
global T_plot, X_plot, R_plot
T_plot = np.linspace(0, T, frame_count*(scope_rounds+1), dtype=np.float32)
X_plot = np.interp(T_plot, T_sim, X_sim)
R_plot = np.interp(T_plot, T_sim, R_sim)
def anim_init():
line1.set_data([], [])
line2.set_data([], [])
anim_ax1.set_xlim((0, T_plot[frame_count-1]))
return (line1, line2, anim_ax1,)
def animate(i):
line1.set_data(T_plot[scope_rounds*i:scope_rounds*i+frame_count-1], X_plot[scope_rounds*i:scope_rounds*i+frame_count-1])
line2.set_data(T_plot[scope_rounds*i:scope_rounds*i+frame_count-1], R_plot[scope_rounds*i:scope_rounds*i+frame_count-1])
anim_ax1.set_xlim((T_plot[i*scope_rounds], T_plot[i*scope_rounds+frame_count-1]))
return (line1, line2, anim_ax1,)
anim = animation.FuncAnimation(anim_fig, animate, init_func=anim_init,
frames=frame_count, interval=10, blit=True,
repeat=True)
# Controllers
T_slider = w.FloatLogSlider(value=10, base=10, min=-0.7, max=1, step=0.01,
description='Durata [s]:', continuous_update=False,
orientation='vertical', layout=w.Layout(width='auto', height='auto', flex='1 1 auto'))
dt_slider = w.FloatLogSlider(value=0.1, base=10, min=-3, max=-1, step=0.01,
description='Campionamento [s]:', continuous_update=False,
orientation='vertical', layout=w.Layout(width='auto', height='auto', flex='1 1 auto'))
X_type = w.Dropdown(options=[('Sinusoide', 0), ('Onda quadra', 1)], value=1,
description='Riferimento: ', continuous_update=False, layout=w.Layout(width='auto', flex='3 3 auto'))
Xf_slider = w.FloatLogSlider(value=0.5, base=10, min=-2, max=2, step=0.01,
description='Frequenza [Hz]:', continuous_update=False,
orientation='vertical', layout=w.Layout(width='auto', height='auto', flex='1 1 auto'))
Xa_slider = w.FloatLogSlider(value=1, base=10, min=-2, max=2, step=0.01,
description='Ampiezza [/]:', continuous_update=False,
orientation='vertical', layout=w.Layout(width='auto', height='auto', flex='1 1 auto'))
input_data = w.interactive_output(simulation, {'Kp': Kp_slider, 'Ti': Ti_slider, 'Td': Td_slider,'Fd': Fd_slider,
'type_select': typeSelect, 'pole':pole_slider, 'damp_select':dampSelect,
'T': T_slider, 'dt': dt_slider,
'X': X_type, 'Xf': Xf_slider, 'Xa': Xa_slider})
display(w.HBox([w.HBox([T_slider, dt_slider], layout=w.Layout(width='25%')),
w.Box([], layout=w.Layout(width='5%')),
w.VBox([X_type, w.HBox([Xf_slider, Xa_slider])], layout=w.Layout(width='30%')),
w.Box([], layout=w.Layout(width='5%'))],
layout=w.Layout(width='100%', justify_content='center')), input_data)
| 0.489015 | 0.889337 |
# Astrophysical $\nu$ Targets
Explore target information from through-going tracks in IceCube.
Note that we prioritize BGS targets within the 90% C.I. of the angular reconstruction reported to GCN/AMON; additional targets from the Legacy Survey imaging are not used.
```
# Load urllib and Beautiful Soup to grab the public GCN alert list.
from urllib.request import urlopen
from bs4 import BeautifulSoup
from astropy.table import Table, unique, vstack
from astropy.time import Time
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from desitarget.io import read_targets_in_hp, read_targets_in_box, read_targets_in_cap, check_hp_target_dir
from desitarget.targetmask import desi_mask, bgs_mask
import healpy as hp
mpl.rc('font', size=14)
```
## Public Alerts from GCN-AMON
```
def get_amon_icecube_astrotrack_alerts(url=None):
"""Get IceCube alerts from the GCN AMON channel (IceCube astrotrack).
This corresponds to single neutrino tracks in the detector between
roughly 100 TeV and 10 PeV, and includes events from the EHE and HESE
channels.
Parameters
----------
url : str
URL of GCN alert webpage.
Returns
-------
tab : astropy.table.Table
Table of alert data.
"""
if url is None:
url = 'https://gcn.gsfc.nasa.gov/amon_icecube_gold_bronze_events.html'
# Parse the GCN/AMON notice page.
html = urlopen('https://gcn.gsfc.nasa.gov/amon_icecube_gold_bronze_events.html')
soup = BeautifulSoup(html.read())
events = soup.table.find_all('tr')
# Accumulate event data.
evtid = []
run = []
evt = []
rev = []
dtm = []
noticetype = []
ra = []
dec = []
err90 = []
err50 = []
energy = []
signalness = []
far = []
comments = []
for event in events[2:]:
data = event.find_all('td')
_id, _rev, _date, _time, _type, _ra, _dec, _err90, _err50, _energy, _sig, _far, _cmt = [_.string.strip() for _ in data]
_run, _evt = [int(_) for _ in _id.split('_')]
evtid.append(_id)
run.append(_run)
evt.append(_evt)
rev.append(int(_rev))
_dtm = '20{}-{}-{}T{}0'.format(*_date.split('/'), _time)
dtm.append(_dtm)
noticetype.append(_type)
ra.append(float(_ra))
dec.append(float(_dec))
err50.append(float(_err50) / 60.)
err90.append(float(_err90) / 60.)
energy.append(float(_energy))
signalness.append(float(_sig))
far.append(float(_far))
comments.append(_cmt)
# Push data into an astropy table for later access.
tab = Table()
tab['EVENTID'] = evtid
tab['RUNID'] = run
tab['EVENT'] = evt
tab['REVISION'] = rev
tab['TIME'] = dtm #Time(dtm, format='isot')
tab['NOTICETYPE'] = noticetype
tab['RA'] = ra
tab['RA'].unit = 'degree'
tab['DEC'] = dec
tab['DEC'].unit = 'degree'
tab['ERR50'] = err50
tab['ERR50'].unit = 'degree'
tab['ERR90'] = err90
tab['ERR90'].unit = 'degree'
tab['ENERGY'] = energy
tab['ENERGY'].unit = 'TeV'
tab['SIGNALNESS'] = signalness
tab['FAR'] = far
tab['COMMENTS'] = comments
return tab
tab = get_amon_icecube_astrotrack_alerts()
tab
```
## Latest Revisions
Select out the latest reconstructions/revisions of each event.
```
revtab = unique(tab, keys='EVENTID', keep='first')
```
### Event Properties
Separate the events in to Bronze and Gold samples and plot properties (angular resolution, FAR, signalness).
```
ntypes = np.unique(revtab['NOTICETYPE'])
bronze = revtab['NOTICETYPE'] == 'BRONZE'
gold = revtab['NOTICETYPE'] == 'GOLD'
fig, axes = plt.subplots(2,2, figsize=(9,8), tight_layout=True)
sigbr = revtab['SIGNALNESS'][bronze]
siggd = revtab['SIGNALNESS'][gold]
farbr = revtab['FAR'][bronze]
fargd = revtab['FAR'][gold]
err50br = revtab['ERR50'][bronze]
err50gd = revtab['ERR50'][gold]
err90br = revtab['ERR90'][bronze]
err90gd = revtab['ERR90'][gold]
ax = axes[0,0]
sigbins = np.linspace(0,1,21)
ax.hist([siggd, sigbr],
label=['Gold', 'Bronze'],
color=['orange', 'blue'],
bins=sigbins,
align='mid', stacked=True)
ax.legend(fontsize=12)
ax.set(xlabel='signalness',
xlim=(0,1),
ylabel='count');
ax = axes[0,1]
ax.scatter(sigbr, farbr, label='Bronze', color='blue')
ax.scatter(siggd, fargd, label='Gold', color='orange')
ax.set(xlabel='signalness',
xlim=(0,1),
ylabel='FAR [yr$^{-1}$]')
ax.grid(ls=':')
ax = axes[1,0]
ax.scatter(sigbr, err50br, label='Bronze', color='blue')
ax.scatter(siggd, err50gd, label='Gold', color='orange')
ax.set(xlabel='signalness',
xlim=(0,1),
ylabel='50% angular error [deg]',
ylim=(0,12))
ax.grid(ls=':')
ax = axes[1,1]
ax.scatter(sigbr, err90br, label='Bronze', color='blue')
ax.scatter(siggd, err90gd, label='Gold', color='orange')
ax.set(xlabel='signalness',
xlim=(0,1),
ylabel='90% angular error [deg]',
ylim=(0,12))
ax.grid(ls=':');
```
## Event Angular Uncertainties
Plot of events on sky with the DESI footprint.
```
desi_footprint = hp.read_map('desi_mask_nside0064.fits')
desi_footprint[desi_footprint == 0] = hp.UNSEEN
desi_footprint[desi_footprint == 1] = 0
rot=120
hp.mollview(desi_footprint, fig=1, cbar=False, unit=r'probability', title='',
min=0, max=10, flip='astro', rot=rot, cmap='gray_r')
hp.graticule(ls=':', alpha=0.5, dpar=30, dmer=45)
# Draw gold events with 90% containment regions (statistical only).
ra_gold = revtab['RA'][gold]
dec_gold = revtab['DEC'][gold]
err90_gold = revtab['ERR90'][gold]
for _ra, _dec, _err90 in zip(ra_gold, dec_gold, err90_gold):
cont_ra, cont_dec = [], []
for phi in np.linspace(0, 2*np.pi, 41):
cont_ra.append(_ra + _err90*np.cos(phi))
cont_dec.append(_dec + _err90*np.sin(phi))
hp.projplot(cont_ra, cont_dec, lonlat=True, lw=1, color='r', label='Gold')
# Draw bronze events with 90% containment regions (statistical only).
ra_brnz = revtab['RA'][bronze]
dec_brnz = revtab['DEC'][bronze]
err90_brnz = revtab['ERR90'][gold]
for _ra, _dec, _err90 in zip(ra_brnz, dec_brnz, err90_brnz):
cont_ra, cont_dec = [], []
for phi in np.linspace(0, 2*np.pi, 41):
cont_ra.append(_ra + _err90*np.cos(phi))
cont_dec.append(_dec + _err90*np.sin(phi))
hp.projplot(cont_ra, cont_dec, lonlat=True, lw=1, color='b', label='Bronze')
# Label latitude lines.
for _dec in [-60,-30,0,30,60]:
vert = 'top' if _dec < 0 else 'bottom' if _dec > 0 else 'center'
hp.projtext(180+rot, _dec, r'{0:+}$^\circ$'.format(_dec),
ha='left', va=vert, fontsize=12, lonlat=True)
# Label longitude lines.
for _ra in np.arange(0,360,45):
hp.projtext(_ra, -40, r'{:d}$^\circ$'.format(_ra),
horizontalalignment='center', fontsize=12, lonlat=True)
fig = plt.gcf()
fig.subplots_adjust(left=0.1, right=0.9, bottom=0.1, top=0.9)
fig.savefig('astrotrack.png', dpi=120, transparent=True)
```
### Gold Events with Highest Signalness and Lowest Angular Uncertainty
```
select = (revtab['SIGNALNESS'] > 0.6) & (revtab['ERR90'] < 3)
revtab[select]
```
### Plot Gold Events + SV Fields
```
def draw_sv(ra_min, ra_max, dec_min, dec_max, color='k'):
"""Given the bounding box of an SV field, plot it using current HEALPix projection.
Parameters
----------
ra_min: float
Minimum RA of the SV bounding box, in degrees.
ra_max: float
Maximum RA of the SV bounding box, in degrees.
dec_min: float
Minimum Dec of the SV bounding box, in degrees.
dec_max: float
Maximum Dec of the SV bounding box, in degrees.
"""
y = np.arange(dec_min, dec_max+0.1, 0.1)
x = np.full_like(y, ra_min)
hp.projplot(x, y, lonlat=True, color=color, alpha=0.7, lw=1, ms=0)
x = np.full_like(y, ra_max)
hp.projplot(x, y, lonlat=True, color=color, alpha=0.7, lw=1, ms=0)
x = np.arange(ra_min, ra_max+0.1, 0.1)
y = np.full_like(x, dec_max)
hp.projplot(x, y, lonlat=True, color=color, alpha=0.7, lw=1, ms=0)
y = np.full_like(x, dec_min)
hp.projplot(x, y, lonlat=True, color=color, alpha=0.7, lw=1, ms=0)
rot=120
hp.mollview(desi_footprint, cbar=False, unit=r'probability', title='',
min=0, max=10, flip='astro', rot=rot, cmap='gray_r')
hp.graticule(ls=':', alpha=0.5, dpar=30, dmer=45)
# Draw gold events with 90% containment regions (statistical only).
ra_gold = revtab['RA'][select]
dec_gold = revtab['DEC'][select]
err90_gold = revtab['ERR90'][select]
for _ra, _dec, _err90 in zip(ra_gold, dec_gold, err90_gold):
cont_ra, cont_dec = [], []
for phi in np.linspace(0, 2*np.pi, 41):
cont_ra.append(_ra + _err90*np.cos(phi))
cont_dec.append(_dec + _err90*np.sin(phi))
hp.projplot(cont_ra, cont_dec, lonlat=True, lw=1, color='r', label='Gold')
# Label latitude lines.
for _dec in [-60,-30,0,30,60]:
vert = 'top' if _dec < 0 else 'bottom' if _dec > 0 else 'center'
hp.projtext(180+rot, _dec, r'{0:+}$^\circ$'.format(_dec),
ha='left', va=vert, fontsize=12, lonlat=True)
# Label longitude lines.
for _ra in np.arange(0,360,45):
hp.projtext(_ra, -40, r'{:d}$^\circ$'.format(_ra),
horizontalalignment='center', fontsize=12, lonlat=True)
show_sv = True
if show_sv:
draw_sv(187,191,61,63) # NGC, 187 < α < 191, 61 < δ < 63 4 BASS+MzLS HDF-N
draw_sv(210,220,50,55) # NGC, 210 < α < 220, 50 < δ < 55 31 BASS+MzLS DEEP2/EGS, SNLS-D3 and HSC photo-z
draw_sv(215,230,41,46) # NGC, 215 < α < 230, 41 < δ < 46 56 BASS+MzLS HSC photo-z
draw_sv(260,280,60,70) # NGC, 260 < α < 280, 60 < δ < 70 92 BASS+MzLS North Ecliptic Pole, a Euclid deep field
draw_sv(129,141,-2,3) # NGC, 129 < α < 141, −2 < δ < 3 60 DECaLS GAMA G09
draw_sv(149,151,1.2,3.2) # NGC, 149 < α < 151, 1.2 < δ < 3.2 4 DECaLS COSMOS
draw_sv(174,186,-3,2) # NGC, 174 < α < 186, −3 < δ < 2 60 DECaLS GAMA G12
draw_sv(211,224,-2,3) # NGC, 211 < α < 224, −2 < δ < 3 65 DECaLS GAMA G15
draw_sv(-5,5,15,20) # SGC, −5 < α < 5, 15 < δ < 20 24 DECaLS
draw_sv(30,40,-7,2) # SGC, 30 < α < 40, −7 < δ < 2 65 DES Stripe 82/HSC, GAMA 02, DEEP2, XMM-LSS
draw_sv(330,340,-2,3) # SGC, 330 < α < 340, −2 < δ < 3 50 DES Stripe 82 , and HSC photo-z
# Overlapping Imaging
draw_sv(135,160,30,35) # NGC, 135 < α < 160, 30 < δ < 35 104 BASS+MzLS and DECaLS
draw_sv(35.3,43,-2.3,2.7) # SGC, 35.3 < α < 43.0, −2.3 < δ < 2.7 39 BASS+MzLS and DES Stripe 82, and HSC photo-z
# Bad Seeing Conditions
draw_sv(215,220,30,40) # NGC, 215 < α < 220, 30 < δ < 40 40 BASS+MzLS AGES
# Strong Dust Extinction
draw_sv(140,150,65,70) # NGC, 140 < α < 150, 65 < δ < 70 19 BASS+MzLS
draw_sv(240,245,20,25) # NGC, 240 < α < 245, 20 < δ < 25 23 DECaLS
draw_sv(345,350,20,25) # SGC, 345 < α < 350, 20 < δ < 25 23 DECaLS
# High Stellar Density
draw_sv(200,210,5,10) # NGC, 200 < α < 210, 5 < δ < 10 49 DECaLS Sagittarius Stream
# High Stellar Density and Strong Dust Extinction
draw_sv(273,283,40,45) # NGC, 273 < α < 283, 40 < δ < 45 37 BASS+MzLS Close to Galactic Plane
draw_sv(260,270,15,20) # NGC, 260 < α < 270, 15 < δ < 20 47 DECaLS Close to Galactic Plane
draw_sv(332,340,25,30) # SGC, 332 < α < 340, 25 < δ < 30 36 DECaLS Close to Galactic Plane
```
## Load DR9 Targets in 90% CI of Alert `IC134191_17593623`
```
nside = 1024
event = revtab[revtab['EVENTID']=='134191_17593623']
run_c = event['RUNID'][0]
evt_c = event['EVENT'][0]
ra_c = event['RA'][0]
dec_c = event['DEC'][0]
date_c = event['TIME'][0]
vec_c = hp.ang2vec(ra_c, dec_c, lonlat=True)
err50_c = event['ERR50'].to('radian').value[0]
err90_c = event['ERR90'].to('radian').value[0]
pix90 = hp.query_disc(nside, vec=vec_c, radius=err90_c, nest=True)
# Load sample of resolved DR9 target catalog using HEALPixels in the 90% C.I.
hpdirnames = ['/global/project/projectdirs/desi/target/catalogs/dr9m/0.44.0/targets/main/resolve/bright',
'/global/project/projectdirs/desi/target/catalogs/dr9m/0.44.0/targets/main/resolve/dark']
readcols = ['TARGETID', 'BRICKID', 'BRICKNAME', 'BRICK_OBJID',
'RA', 'DEC', 'PMRA', 'PMDEC', 'REF_EPOCH',
'DESI_TARGET', 'BGS_TARGET', 'MWS_TARGET',
'FLUX_G', 'FLUX_R', 'FLUX_Z',
'FIBERFLUX_G', 'FIBERFLUX_R', 'FIBERFLUX_Z']
targlist90 = None
for hpdirname in hpdirnames:
if targlist90 is None:
targlist90 = Table(read_targets_in_hp(hpdirname, nside=nside, pixlist=pix90, columns=readcols))
else:
targlist90 = vstack(targlist90, Table(read_targets_in_hp(hpdirname, nside=nside, pixlist=pix90, columns=readcols)))
targlist90 = unique(targlist90)
ra_c, dec_c, err50_c, err90_c
targlist90
ra90, dec90 = [targlist90[_] for _ in ['RA', 'DEC']]
```
### Plot Selected Targets within 90% C.I.
```
xmin = np.round(ra_c - 2.5, decimals=1)
xmax = np.round(ra_c + 2.5, decimals=1)
# xmin = angle_to_180(np.round(ra_c - 5))
# xmax = angle_to_180(np.round(ra_c + 5))
if xmax < xmin:
xmin, xmax = xmax, xmin
cxmin, cxmax = xmin, xmax
frot = 0.
if xmax > 90 and xmin < -90:
frot, cxmin, cmax = 180., xmax-180., xmax+180.
ymin = np.round(dec_c - 2.5, decimals=1)
ymax = np.round(dec_c + 2.5, decimals=1)
faspect = np.abs(cxmax - cxmin)/np.abs(ymax-ymin)
fysize = 4
figsize = (fysize*faspect+2, fysize+2.75)
figsize
fig, ax = plt.subplots(1,1, num=1, figsize=figsize)
for _radius, _lstyle, _clev in zip([err50_c, err90_c], ['--', '-'], ['50', '90']):
cont_ra, cont_dec = [], []
_r = np.degrees(_radius)
for phi in np.linspace(0, 2*np.pi, 41):
cont_ra.append(ra_c + _r*np.cos(phi))
cont_dec.append(dec_c + _r*np.sin(phi))
ax.plot(cont_ra, cont_dec, 'g', ls=_lstyle, label='{}% CI'.format(_clev))
ratarg, dectarg = ra90, dec90
ratarg[ratarg < 0] += 360
ax.plot(ratarg, dectarg, 'k,', alpha=0.3)
ax.set(xlim=(cxmax, cxmin),
xlabel='RA [deg]',
ylabel='Dec [deg]',
ylim=(ymin, ymax),
title='{} {}'.format(run_c, evt_c),
aspect='equal')
ax.grid(ls=':')
circ = plt.Circle((ra_c, dec_c), radius=1.6, fc='None', ec='k', ls='--')
ax.add_artist(circ)
_h, _l = ax.get_legend_handles_labels()
_h.append(circ)
_l.append('DESI FOV')
ax.legend(handles=_h, labels=_l, fontsize=12, ncol=3)
# # cb = fig.colorbar(img, orientation='horizontal', shrink=0.85, fraction=0.1,
# # pad=0.1, ax=ax)
# # cb.set_label(r'GW angular posterior Pr$(\alpha,\delta)$')
fig.savefig('targlist.png')
```
### Select BGS Bright Targets in the FOV
```
select_targets = (targlist90['BGS_TARGET'] & bgs_mask.mask('BGS_BRIGHT')) != 0
ra90, dec90 = [targlist90[_][select_targets] for _ in ['RA', 'DEC']]
len(ra90)
fig, ax = plt.subplots(1,1, num=1, figsize=figsize)
fig.patch.set_alpha(0)
for _radius, _lstyle, _clev in zip([err50_c, err90_c], ['--', '-'], ['50', '90']):
cont_ra, cont_dec = [], []
_r = np.degrees(_radius)
for phi in np.linspace(0, 2*np.pi, 41):
cont_ra.append(ra_c + _r*np.cos(phi))
cont_dec.append(dec_c + _r*np.sin(phi))
ax.plot(cont_ra, cont_dec, 'g', ls=_lstyle, label='{}% CI'.format(_clev), lw=2)
ratarg, dectarg = ra90, dec90
ratarg[ratarg < 0] += 360
ax.plot(ratarg, dectarg, 'k.', alpha=0.05)
ax.set(xlim=(145.5, 140.5),
xlabel='RA [deg]',
ylabel='Dec [deg]',
ylim=(1, 6),
title='Run {}, Event {}\n{}'.format(run_c, evt_c, date_c),
aspect='equal')
ax.grid(ls=':')
circ = plt.Circle((ra_c, dec_c), radius=1.6, fc='None', ec='b', ls='--', lw=2)
ax.add_artist(circ)
_h, _l = ax.get_legend_handles_labels()
_h.append(circ)
_l.append('DESI FOV')
ax.legend(handles=_h, labels=_l, fontsize=12, ncol=3)
# # cb = fig.colorbar(img, orientation='horizontal', shrink=0.85, fraction=0.1,
# # pad=0.1, ax=ax)
# # cb.set_label(r'GW angular posterior Pr$(\alpha,\delta)$')
fig.savefig('targlist_bgs.png', dpi=100)
```
## Write Secondary Target List
Dump output list to ASCII and FITS files.
```
output = targlist90[select_targets]['RA', 'DEC', 'PMRA', 'PMDEC', 'REF_EPOCH']
output['OVERRIDE'] = np.full_like(output['RA'], False, dtype=bool)
output.write('nu_IC134191-17593623_dr9m_sec_targets.txt', format='ascii', overwrite=True)
output.write('nu_IC134191-17593623_dr9m_sec_targets.fits', format='fits', overwrite=True)
```
|
github_jupyter
|
# Load urllib and Beautiful Soup to grab the public GCN alert list.
from urllib.request import urlopen
from bs4 import BeautifulSoup
from astropy.table import Table, unique, vstack
from astropy.time import Time
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from desitarget.io import read_targets_in_hp, read_targets_in_box, read_targets_in_cap, check_hp_target_dir
from desitarget.targetmask import desi_mask, bgs_mask
import healpy as hp
mpl.rc('font', size=14)
def get_amon_icecube_astrotrack_alerts(url=None):
"""Get IceCube alerts from the GCN AMON channel (IceCube astrotrack).
This corresponds to single neutrino tracks in the detector between
roughly 100 TeV and 10 PeV, and includes events from the EHE and HESE
channels.
Parameters
----------
url : str
URL of GCN alert webpage.
Returns
-------
tab : astropy.table.Table
Table of alert data.
"""
if url is None:
url = 'https://gcn.gsfc.nasa.gov/amon_icecube_gold_bronze_events.html'
# Parse the GCN/AMON notice page.
html = urlopen('https://gcn.gsfc.nasa.gov/amon_icecube_gold_bronze_events.html')
soup = BeautifulSoup(html.read())
events = soup.table.find_all('tr')
# Accumulate event data.
evtid = []
run = []
evt = []
rev = []
dtm = []
noticetype = []
ra = []
dec = []
err90 = []
err50 = []
energy = []
signalness = []
far = []
comments = []
for event in events[2:]:
data = event.find_all('td')
_id, _rev, _date, _time, _type, _ra, _dec, _err90, _err50, _energy, _sig, _far, _cmt = [_.string.strip() for _ in data]
_run, _evt = [int(_) for _ in _id.split('_')]
evtid.append(_id)
run.append(_run)
evt.append(_evt)
rev.append(int(_rev))
_dtm = '20{}-{}-{}T{}0'.format(*_date.split('/'), _time)
dtm.append(_dtm)
noticetype.append(_type)
ra.append(float(_ra))
dec.append(float(_dec))
err50.append(float(_err50) / 60.)
err90.append(float(_err90) / 60.)
energy.append(float(_energy))
signalness.append(float(_sig))
far.append(float(_far))
comments.append(_cmt)
# Push data into an astropy table for later access.
tab = Table()
tab['EVENTID'] = evtid
tab['RUNID'] = run
tab['EVENT'] = evt
tab['REVISION'] = rev
tab['TIME'] = dtm #Time(dtm, format='isot')
tab['NOTICETYPE'] = noticetype
tab['RA'] = ra
tab['RA'].unit = 'degree'
tab['DEC'] = dec
tab['DEC'].unit = 'degree'
tab['ERR50'] = err50
tab['ERR50'].unit = 'degree'
tab['ERR90'] = err90
tab['ERR90'].unit = 'degree'
tab['ENERGY'] = energy
tab['ENERGY'].unit = 'TeV'
tab['SIGNALNESS'] = signalness
tab['FAR'] = far
tab['COMMENTS'] = comments
return tab
tab = get_amon_icecube_astrotrack_alerts()
tab
revtab = unique(tab, keys='EVENTID', keep='first')
ntypes = np.unique(revtab['NOTICETYPE'])
bronze = revtab['NOTICETYPE'] == 'BRONZE'
gold = revtab['NOTICETYPE'] == 'GOLD'
fig, axes = plt.subplots(2,2, figsize=(9,8), tight_layout=True)
sigbr = revtab['SIGNALNESS'][bronze]
siggd = revtab['SIGNALNESS'][gold]
farbr = revtab['FAR'][bronze]
fargd = revtab['FAR'][gold]
err50br = revtab['ERR50'][bronze]
err50gd = revtab['ERR50'][gold]
err90br = revtab['ERR90'][bronze]
err90gd = revtab['ERR90'][gold]
ax = axes[0,0]
sigbins = np.linspace(0,1,21)
ax.hist([siggd, sigbr],
label=['Gold', 'Bronze'],
color=['orange', 'blue'],
bins=sigbins,
align='mid', stacked=True)
ax.legend(fontsize=12)
ax.set(xlabel='signalness',
xlim=(0,1),
ylabel='count');
ax = axes[0,1]
ax.scatter(sigbr, farbr, label='Bronze', color='blue')
ax.scatter(siggd, fargd, label='Gold', color='orange')
ax.set(xlabel='signalness',
xlim=(0,1),
ylabel='FAR [yr$^{-1}$]')
ax.grid(ls=':')
ax = axes[1,0]
ax.scatter(sigbr, err50br, label='Bronze', color='blue')
ax.scatter(siggd, err50gd, label='Gold', color='orange')
ax.set(xlabel='signalness',
xlim=(0,1),
ylabel='50% angular error [deg]',
ylim=(0,12))
ax.grid(ls=':')
ax = axes[1,1]
ax.scatter(sigbr, err90br, label='Bronze', color='blue')
ax.scatter(siggd, err90gd, label='Gold', color='orange')
ax.set(xlabel='signalness',
xlim=(0,1),
ylabel='90% angular error [deg]',
ylim=(0,12))
ax.grid(ls=':');
desi_footprint = hp.read_map('desi_mask_nside0064.fits')
desi_footprint[desi_footprint == 0] = hp.UNSEEN
desi_footprint[desi_footprint == 1] = 0
rot=120
hp.mollview(desi_footprint, fig=1, cbar=False, unit=r'probability', title='',
min=0, max=10, flip='astro', rot=rot, cmap='gray_r')
hp.graticule(ls=':', alpha=0.5, dpar=30, dmer=45)
# Draw gold events with 90% containment regions (statistical only).
ra_gold = revtab['RA'][gold]
dec_gold = revtab['DEC'][gold]
err90_gold = revtab['ERR90'][gold]
for _ra, _dec, _err90 in zip(ra_gold, dec_gold, err90_gold):
cont_ra, cont_dec = [], []
for phi in np.linspace(0, 2*np.pi, 41):
cont_ra.append(_ra + _err90*np.cos(phi))
cont_dec.append(_dec + _err90*np.sin(phi))
hp.projplot(cont_ra, cont_dec, lonlat=True, lw=1, color='r', label='Gold')
# Draw bronze events with 90% containment regions (statistical only).
ra_brnz = revtab['RA'][bronze]
dec_brnz = revtab['DEC'][bronze]
err90_brnz = revtab['ERR90'][gold]
for _ra, _dec, _err90 in zip(ra_brnz, dec_brnz, err90_brnz):
cont_ra, cont_dec = [], []
for phi in np.linspace(0, 2*np.pi, 41):
cont_ra.append(_ra + _err90*np.cos(phi))
cont_dec.append(_dec + _err90*np.sin(phi))
hp.projplot(cont_ra, cont_dec, lonlat=True, lw=1, color='b', label='Bronze')
# Label latitude lines.
for _dec in [-60,-30,0,30,60]:
vert = 'top' if _dec < 0 else 'bottom' if _dec > 0 else 'center'
hp.projtext(180+rot, _dec, r'{0:+}$^\circ$'.format(_dec),
ha='left', va=vert, fontsize=12, lonlat=True)
# Label longitude lines.
for _ra in np.arange(0,360,45):
hp.projtext(_ra, -40, r'{:d}$^\circ$'.format(_ra),
horizontalalignment='center', fontsize=12, lonlat=True)
fig = plt.gcf()
fig.subplots_adjust(left=0.1, right=0.9, bottom=0.1, top=0.9)
fig.savefig('astrotrack.png', dpi=120, transparent=True)
select = (revtab['SIGNALNESS'] > 0.6) & (revtab['ERR90'] < 3)
revtab[select]
def draw_sv(ra_min, ra_max, dec_min, dec_max, color='k'):
"""Given the bounding box of an SV field, plot it using current HEALPix projection.
Parameters
----------
ra_min: float
Minimum RA of the SV bounding box, in degrees.
ra_max: float
Maximum RA of the SV bounding box, in degrees.
dec_min: float
Minimum Dec of the SV bounding box, in degrees.
dec_max: float
Maximum Dec of the SV bounding box, in degrees.
"""
y = np.arange(dec_min, dec_max+0.1, 0.1)
x = np.full_like(y, ra_min)
hp.projplot(x, y, lonlat=True, color=color, alpha=0.7, lw=1, ms=0)
x = np.full_like(y, ra_max)
hp.projplot(x, y, lonlat=True, color=color, alpha=0.7, lw=1, ms=0)
x = np.arange(ra_min, ra_max+0.1, 0.1)
y = np.full_like(x, dec_max)
hp.projplot(x, y, lonlat=True, color=color, alpha=0.7, lw=1, ms=0)
y = np.full_like(x, dec_min)
hp.projplot(x, y, lonlat=True, color=color, alpha=0.7, lw=1, ms=0)
rot=120
hp.mollview(desi_footprint, cbar=False, unit=r'probability', title='',
min=0, max=10, flip='astro', rot=rot, cmap='gray_r')
hp.graticule(ls=':', alpha=0.5, dpar=30, dmer=45)
# Draw gold events with 90% containment regions (statistical only).
ra_gold = revtab['RA'][select]
dec_gold = revtab['DEC'][select]
err90_gold = revtab['ERR90'][select]
for _ra, _dec, _err90 in zip(ra_gold, dec_gold, err90_gold):
cont_ra, cont_dec = [], []
for phi in np.linspace(0, 2*np.pi, 41):
cont_ra.append(_ra + _err90*np.cos(phi))
cont_dec.append(_dec + _err90*np.sin(phi))
hp.projplot(cont_ra, cont_dec, lonlat=True, lw=1, color='r', label='Gold')
# Label latitude lines.
for _dec in [-60,-30,0,30,60]:
vert = 'top' if _dec < 0 else 'bottom' if _dec > 0 else 'center'
hp.projtext(180+rot, _dec, r'{0:+}$^\circ$'.format(_dec),
ha='left', va=vert, fontsize=12, lonlat=True)
# Label longitude lines.
for _ra in np.arange(0,360,45):
hp.projtext(_ra, -40, r'{:d}$^\circ$'.format(_ra),
horizontalalignment='center', fontsize=12, lonlat=True)
show_sv = True
if show_sv:
draw_sv(187,191,61,63) # NGC, 187 < α < 191, 61 < δ < 63 4 BASS+MzLS HDF-N
draw_sv(210,220,50,55) # NGC, 210 < α < 220, 50 < δ < 55 31 BASS+MzLS DEEP2/EGS, SNLS-D3 and HSC photo-z
draw_sv(215,230,41,46) # NGC, 215 < α < 230, 41 < δ < 46 56 BASS+MzLS HSC photo-z
draw_sv(260,280,60,70) # NGC, 260 < α < 280, 60 < δ < 70 92 BASS+MzLS North Ecliptic Pole, a Euclid deep field
draw_sv(129,141,-2,3) # NGC, 129 < α < 141, −2 < δ < 3 60 DECaLS GAMA G09
draw_sv(149,151,1.2,3.2) # NGC, 149 < α < 151, 1.2 < δ < 3.2 4 DECaLS COSMOS
draw_sv(174,186,-3,2) # NGC, 174 < α < 186, −3 < δ < 2 60 DECaLS GAMA G12
draw_sv(211,224,-2,3) # NGC, 211 < α < 224, −2 < δ < 3 65 DECaLS GAMA G15
draw_sv(-5,5,15,20) # SGC, −5 < α < 5, 15 < δ < 20 24 DECaLS
draw_sv(30,40,-7,2) # SGC, 30 < α < 40, −7 < δ < 2 65 DES Stripe 82/HSC, GAMA 02, DEEP2, XMM-LSS
draw_sv(330,340,-2,3) # SGC, 330 < α < 340, −2 < δ < 3 50 DES Stripe 82 , and HSC photo-z
# Overlapping Imaging
draw_sv(135,160,30,35) # NGC, 135 < α < 160, 30 < δ < 35 104 BASS+MzLS and DECaLS
draw_sv(35.3,43,-2.3,2.7) # SGC, 35.3 < α < 43.0, −2.3 < δ < 2.7 39 BASS+MzLS and DES Stripe 82, and HSC photo-z
# Bad Seeing Conditions
draw_sv(215,220,30,40) # NGC, 215 < α < 220, 30 < δ < 40 40 BASS+MzLS AGES
# Strong Dust Extinction
draw_sv(140,150,65,70) # NGC, 140 < α < 150, 65 < δ < 70 19 BASS+MzLS
draw_sv(240,245,20,25) # NGC, 240 < α < 245, 20 < δ < 25 23 DECaLS
draw_sv(345,350,20,25) # SGC, 345 < α < 350, 20 < δ < 25 23 DECaLS
# High Stellar Density
draw_sv(200,210,5,10) # NGC, 200 < α < 210, 5 < δ < 10 49 DECaLS Sagittarius Stream
# High Stellar Density and Strong Dust Extinction
draw_sv(273,283,40,45) # NGC, 273 < α < 283, 40 < δ < 45 37 BASS+MzLS Close to Galactic Plane
draw_sv(260,270,15,20) # NGC, 260 < α < 270, 15 < δ < 20 47 DECaLS Close to Galactic Plane
draw_sv(332,340,25,30) # SGC, 332 < α < 340, 25 < δ < 30 36 DECaLS Close to Galactic Plane
nside = 1024
event = revtab[revtab['EVENTID']=='134191_17593623']
run_c = event['RUNID'][0]
evt_c = event['EVENT'][0]
ra_c = event['RA'][0]
dec_c = event['DEC'][0]
date_c = event['TIME'][0]
vec_c = hp.ang2vec(ra_c, dec_c, lonlat=True)
err50_c = event['ERR50'].to('radian').value[0]
err90_c = event['ERR90'].to('radian').value[0]
pix90 = hp.query_disc(nside, vec=vec_c, radius=err90_c, nest=True)
# Load sample of resolved DR9 target catalog using HEALPixels in the 90% C.I.
hpdirnames = ['/global/project/projectdirs/desi/target/catalogs/dr9m/0.44.0/targets/main/resolve/bright',
'/global/project/projectdirs/desi/target/catalogs/dr9m/0.44.0/targets/main/resolve/dark']
readcols = ['TARGETID', 'BRICKID', 'BRICKNAME', 'BRICK_OBJID',
'RA', 'DEC', 'PMRA', 'PMDEC', 'REF_EPOCH',
'DESI_TARGET', 'BGS_TARGET', 'MWS_TARGET',
'FLUX_G', 'FLUX_R', 'FLUX_Z',
'FIBERFLUX_G', 'FIBERFLUX_R', 'FIBERFLUX_Z']
targlist90 = None
for hpdirname in hpdirnames:
if targlist90 is None:
targlist90 = Table(read_targets_in_hp(hpdirname, nside=nside, pixlist=pix90, columns=readcols))
else:
targlist90 = vstack(targlist90, Table(read_targets_in_hp(hpdirname, nside=nside, pixlist=pix90, columns=readcols)))
targlist90 = unique(targlist90)
ra_c, dec_c, err50_c, err90_c
targlist90
ra90, dec90 = [targlist90[_] for _ in ['RA', 'DEC']]
xmin = np.round(ra_c - 2.5, decimals=1)
xmax = np.round(ra_c + 2.5, decimals=1)
# xmin = angle_to_180(np.round(ra_c - 5))
# xmax = angle_to_180(np.round(ra_c + 5))
if xmax < xmin:
xmin, xmax = xmax, xmin
cxmin, cxmax = xmin, xmax
frot = 0.
if xmax > 90 and xmin < -90:
frot, cxmin, cmax = 180., xmax-180., xmax+180.
ymin = np.round(dec_c - 2.5, decimals=1)
ymax = np.round(dec_c + 2.5, decimals=1)
faspect = np.abs(cxmax - cxmin)/np.abs(ymax-ymin)
fysize = 4
figsize = (fysize*faspect+2, fysize+2.75)
figsize
fig, ax = plt.subplots(1,1, num=1, figsize=figsize)
for _radius, _lstyle, _clev in zip([err50_c, err90_c], ['--', '-'], ['50', '90']):
cont_ra, cont_dec = [], []
_r = np.degrees(_radius)
for phi in np.linspace(0, 2*np.pi, 41):
cont_ra.append(ra_c + _r*np.cos(phi))
cont_dec.append(dec_c + _r*np.sin(phi))
ax.plot(cont_ra, cont_dec, 'g', ls=_lstyle, label='{}% CI'.format(_clev))
ratarg, dectarg = ra90, dec90
ratarg[ratarg < 0] += 360
ax.plot(ratarg, dectarg, 'k,', alpha=0.3)
ax.set(xlim=(cxmax, cxmin),
xlabel='RA [deg]',
ylabel='Dec [deg]',
ylim=(ymin, ymax),
title='{} {}'.format(run_c, evt_c),
aspect='equal')
ax.grid(ls=':')
circ = plt.Circle((ra_c, dec_c), radius=1.6, fc='None', ec='k', ls='--')
ax.add_artist(circ)
_h, _l = ax.get_legend_handles_labels()
_h.append(circ)
_l.append('DESI FOV')
ax.legend(handles=_h, labels=_l, fontsize=12, ncol=3)
# # cb = fig.colorbar(img, orientation='horizontal', shrink=0.85, fraction=0.1,
# # pad=0.1, ax=ax)
# # cb.set_label(r'GW angular posterior Pr$(\alpha,\delta)$')
fig.savefig('targlist.png')
select_targets = (targlist90['BGS_TARGET'] & bgs_mask.mask('BGS_BRIGHT')) != 0
ra90, dec90 = [targlist90[_][select_targets] for _ in ['RA', 'DEC']]
len(ra90)
fig, ax = plt.subplots(1,1, num=1, figsize=figsize)
fig.patch.set_alpha(0)
for _radius, _lstyle, _clev in zip([err50_c, err90_c], ['--', '-'], ['50', '90']):
cont_ra, cont_dec = [], []
_r = np.degrees(_radius)
for phi in np.linspace(0, 2*np.pi, 41):
cont_ra.append(ra_c + _r*np.cos(phi))
cont_dec.append(dec_c + _r*np.sin(phi))
ax.plot(cont_ra, cont_dec, 'g', ls=_lstyle, label='{}% CI'.format(_clev), lw=2)
ratarg, dectarg = ra90, dec90
ratarg[ratarg < 0] += 360
ax.plot(ratarg, dectarg, 'k.', alpha=0.05)
ax.set(xlim=(145.5, 140.5),
xlabel='RA [deg]',
ylabel='Dec [deg]',
ylim=(1, 6),
title='Run {}, Event {}\n{}'.format(run_c, evt_c, date_c),
aspect='equal')
ax.grid(ls=':')
circ = plt.Circle((ra_c, dec_c), radius=1.6, fc='None', ec='b', ls='--', lw=2)
ax.add_artist(circ)
_h, _l = ax.get_legend_handles_labels()
_h.append(circ)
_l.append('DESI FOV')
ax.legend(handles=_h, labels=_l, fontsize=12, ncol=3)
# # cb = fig.colorbar(img, orientation='horizontal', shrink=0.85, fraction=0.1,
# # pad=0.1, ax=ax)
# # cb.set_label(r'GW angular posterior Pr$(\alpha,\delta)$')
fig.savefig('targlist_bgs.png', dpi=100)
output = targlist90[select_targets]['RA', 'DEC', 'PMRA', 'PMDEC', 'REF_EPOCH']
output['OVERRIDE'] = np.full_like(output['RA'], False, dtype=bool)
output.write('nu_IC134191-17593623_dr9m_sec_targets.txt', format='ascii', overwrite=True)
output.write('nu_IC134191-17593623_dr9m_sec_targets.fits', format='fits', overwrite=True)
| 0.671794 | 0.858185 |
### Note
* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
```
# Dependencies and Setup
import pandas as pd
import os
# File to Load (Remember to Change These)
file_to_load = "Resources/purchase_data.csv"
# Read Purchasing File and store into Pandas data frame
purchase_data = pd.read_csv(file_to_load)
purchase_data.head()
```
## Player Count
* Display the total number of players
```
#total_players = purchase_data.loc[:,["SN"]]
#number_of_players = total_players.count()
#number_of_players
total_players= len(purchase_data["SN"].value_counts())
total_players_df=pd.DataFrame({"Total Players": [total_players]})
total_players_df
```
## Purchasing Analysis (Total)
* Run basic calculations to obtain number of unique items, average price, etc.
* Create a summary data frame to hold the results
* Optional: give the displayed data cleaner formatting
* Display the summary data frame
```
unique_items = len(purchase_data["Item ID"].unique())
average_price = round(purchase_data["Price"].mean(),2)
total_purchases = len(purchase_data["Purchase ID"])
total_revenue = purchase_data["Price"].sum()
summary_df = pd.DataFrame({"Number of Unique Items": [unique_items],
"Average Price": [average_price],
"Number of Purchases": [total_purchases],
"Total Revenue" : [total_revenue]})
summary_df['Number of Unique Items'] = summary_df['Number of Unique Items'].map("${:,.2f}".format)
summary_df['Average Price'] = summary_df['Average Price'].map("${:,.2f}".format)
summary_df['Total Revenue'] = summary_df['Total Revenue'].map("${:,.2f}".format)
summary_df
# add in style dictionary with format
```
## Gender Demographics
* Percentage and Count of Male Players
* Percentage and Count of Female Players
* Percentage and Count of Other / Non-Disclosed
```
# summary table with Male/Female & counts, then percentage = total
gender_totals = purchase_data.groupby("Gender")["SN"].nunique()
gender_percent = round((gender_totals / total_players) * 100,2)
gender_df = pd.DataFrame({"Total Count":gender_totals,
"Percentage of Players": gender_percent})
gender_df['Percentage of Players'] = gender_df['Percentage of Players'].map("{:.2f}%".format)
gender_df
```
## Purchasing Analysis (Gender)
* Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. by gender
* Create a summary data frame to hold the results
* Optional: give the displayed data cleaner formatting
* Display the summary data frame
```
gender_purchase_count = purchase_data.groupby(["Gender"]).count()["Price"].rename('Purchase Count')
#gender_purchase_count
average_purchase_price = round(purchase_data.groupby(['Gender']).mean()['Price']).rename('Average Purchase Price')
#Purchase_Count = purchase_data_df.groupby(["Gender"]).count()["Purchase ID"].rename("Purchase Count")
grouped_total_price = purchase_data.groupby(["Gender"]).sum()["Price"].rename("Total Purchase Value")
per_person_total = grouped_total_price /gender_df["Total Count"] #price/mean
#purchase total/sum
gender_analysis = {'Purchase Count':gender_purchase_count,
'Average Purchase Price':average_purchase_price,
'Total Purchase Value':grouped_total_price,
'Avg Total Purchase per Person':per_person_total}
#Create the Gender Demographics DataFrame
gender_analysis_df = pd.DataFrame(gender_analysis)
# Format data to 1,000
gender_analysis_df['Average Purchase Price'] = gender_analysis_df['Average Purchase Price'].map("${:,.2f}".format)
gender_analysis_df['Total Purchase Value'] = gender_analysis_df['Total Purchase Value'].map("${:,.2f}".format)
gender_analysis_df['Avg Total Purchase per Person'] = gender_analysis_df['Avg Total Purchase per Person'].map("${:,.2f}".format)
gender_analysis_df
```
## Age Demographics
* Establish bins for ages
* Categorize the existing players using the age bins. Hint: use pd.cut()
* Calculate the numbers and percentages by age group
* Create a summary data frame to hold the results
* Optional: round the percentage column to two decimal points
* Display Age Demographics Table
```
# AGE DEMOGRAPHICS
#Establish bins for ages
# Categorize the existing players using the age bins. Hint: use pd.cut()
# Calculate the numbers and percentages by age group
# Create a summary data frame to hold the results
# Optional: round the percentage column to two decimal points
# Display Age Demographics Table
purchase_data2= purchase_data.loc[:, ["SN", "Age"]]
# remove duplicates
cleaned_purchase_data = purchase_data2.drop_duplicates()
# create the bins in which the data will be held
bins = [0,9,14,19,24,29,34,39,200]
#print(len(bins))
# Create the names for the bins
age_groups = ['<10','10-14','15-19','20-24','25-29','30-34','35-39','40+']
#print(len(age_groups))
# create a group based off the bins
cleaned_purchase_data['Age'] = pd.cut(cleaned_purchase_data['Age'], bins, labels= age_groups, include_lowest = True)
age_groups_count = cleaned_purchase_data.groupby(["Age"]).count()["SN"].rename("Total Count")
percent_age_group = age_groups_count / int(total_players) * 100
# create dicitonary of lists
grouped_age = {'Total Count':age_groups_count,
'Percentage of Players':percent_age_group,}
# create gd df
gender_demographics_df = pd.DataFrame(grouped_age)
# format data
gender_demographics_df['Percentage of Players'] = gender_demographics_df['Percentage of Players'].map("{:.2f}%".format)
gender_demographics_df
```
## Purchasing Analysis (Age)
* Bin the purchase_data data frame by age
* Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. in the table below
* Create a summary data frame to hold the results
* Optional: give the displayed data cleaner formatting
* Display the summary data frame
```
# bin the purchase_data dataframe by age
# find purchase count
# avg. purchase price, avg. purchase total per person etc.
# create summary df
# format
purchase_data_df3 = purchase_data.loc[:, ["Age","Price","SN","Purchase ID"]]
# Create the bins in which Data will be held
bins = [0, 9, 14, 19, 24, 29, 34, 39, 500]
#print(len(bins))
# Create the names for the bins
age_groups = ["<10", "10-14", "15-19", "20-24", "25-29", "30-34", "35-39", "40+"]
#print(len(age_groups))
# Creating a group based off of the bins
purchase_data_df3["Age"] = pd.cut(purchase_data_df3["Age"], bins, labels=age_groups, include_lowest=True)
purchased_age_count = purchase_data_df3.groupby(["Age"]).count()["Purchase ID"].rename("Total Count")
avg_purchase_price = purchase_data_df3.groupby(["Age"]).mean()["Price"].rename("Average Purchase Price")
total_purchase_value = purchase_data_df3.groupby(["Age"]).sum()["Price"].rename("Total Purchase Value")
total_purchase_value
avg_purchase_total_per_person = total_purchase_value / grouped_age
# create dictionary of lists
age_purchase_analysis = {'Purchase Count' : purchased_age_count,
'Average Purchase Price' : avg_purchase_price,
'Total Purchase Value' : total_purchase_value,
'Avg Total Purchase per Person' : avg_purchase_total_per_person}
purchasing_analysis_age_df = pd.DataFrame(age_purchase_analysis)
purchasing_analysis_age_df['Average Purchase Price'] = purchasing_analysis_age_df['Average Purchase Price'].map("${:,.2f}".format)
purchasing_analysis_age_df['Total Purchase Value'] = purchasing_analysis_age_df['Total Purchase Value'].map("${:,.2f}".format)
purchasing_analysis_age_df['Avg Total Purchase per Person'] = purchasing_analysis_age_df['Avg Total Purchase per Person'].map("${:,.2f}".format)
purchasising_analysis_age_df
```
## Top Spenders
* Run basic calculations to obtain the results in the table below
* Create a summary data frame to hold the results
* Sort the total purchase value column in descending order
* Optional: give the displayed data cleaner formatting
* Display a preview of the summary data frame
```
SN_purchase_data = purchase_data.loc[:, ["SN","Price","Purchase ID"]]
# Reference multiple columns within a DataFrame
grouped_SN_price = purchase_data.groupby(["SN"]).mean()["Price"].rename("Average Purchase Price")
grouped_SN_pur_id = purchase_data.groupby(["SN"]).count()["Purchase ID"].rename("Purchase Count")
grouped_SN_total_price = purchase_data.groupby(["SN"]).sum()["Price"].rename("Total Purchase Value")
# Initialize Dictionary of lists
Top_Spender_Analysis = {'Purchase Count':grouped_SN_pur_id,
'Average Purchase Price': grouped_SN_price,
'Total Purchase Value':grouped_SN_total_price,}
# Create the SN Spender DataFrame
Top_Spender_df = pd.DataFrame(Top_Spender_Analysis)
# Sort by Total Purchase Value
Top_Spender_df=Top_Spender_df.sort_values(['Total Purchase Value'], ascending=False)
# Format data to $1,000.00
Top_Spender_df['Average Purchase Price'] = Top_Spender_df['Average Purchase Price'].map("${:,.2f}".format)
Top_Spender_df['Total Purchase Value'] = Top_Spender_df['Total Purchase Value'].map("${:,.2f}".format)
#Show Data Frame
Top_Spender_df.head(5)
```
## Most Popular Items
* Retrieve the Item ID, Item Name, and Item Price columns
* Group by Item ID and Item Name. Perform calculations to obtain purchase count, average item price, and total purchase value
* Create a summary data frame to hold the results
* Sort the purchase count column in descending order
* Optional: give the displayed data cleaner formatting
* Display a preview of the summary data frame
```
#Item_Data_df
Item_Data_df = purchase_data.loc[:, ["Item ID","Item Name","Price"]]
# Group Item_Data_df by ["Item ID", "Item Name"]
Grouped_Item_df =Item_Data_df.groupby(["Item ID", "Item Name"])
# Calculations on groupby DF
item_ID_count = Grouped_Item_df.count()["Price"].rename("Purchase Count")
item_ID_price = Grouped_Item_df.mean()["Price"].rename("Item Price")
total_purchase_value = Grouped_Item_df.sum()["Price"].rename("Total Purchase Value")
# Initialize Dictionary of lists
Popular_Items_Analysis = {'Purchase Count':item_ID_count,
'Item Price': item_ID_price,
'Total Purchase Value': total_purchase_value}
# Create the SN Spender DataFrame
Most_Popular_Items_df = pd.DataFrame(Popular_Items_Analysis)
# Sort by Purchase Count
Most_Popular_Items_df=Most_Popular_Items_df.sort_values(['Purchase Count'], ascending=False)
# Format data to $1,000.00
Most_Popular_Items_df['Item Price'] = Most_Popular_Items_df['Item Price'].map("${:,.2f}".format)
Most_Popular_Items_df['Total Purchase Value'] = Most_Popular_Items_df['Total Purchase Value'].map("${:,.2f}".format)
#Show Data Frame
Most_Popular_Items_df.head()
```
## Most Profitable Items
* Sort the above table by total purchase value in descending order
* Optional: give the displayed data cleaner formatting
* Display a preview of the data frame
```
# Create the SN Spender DataFrame
Most_Popular_Items_df = pd.DataFrame(Popular_Items_Analysis)
# Sort by Total Purchase Value
Most_Popular_Items_df=Most_Popular_Items_df.sort_values(['Total Purchase Value'], ascending=False)
# Format data to $1,000.00
Most_Popular_Items_df['Item Price'] = Most_Popular_Items_df['Item Price'].map("${:,.2f}".format)
Most_Popular_Items_df['Total Purchase Value'] = Most_Popular_Items_df['Total Purchase Value'].map("${:,.2f}".format)
#Show Data Frame
Most_Popular_Items_df.head()
```
|
github_jupyter
|
# Dependencies and Setup
import pandas as pd
import os
# File to Load (Remember to Change These)
file_to_load = "Resources/purchase_data.csv"
# Read Purchasing File and store into Pandas data frame
purchase_data = pd.read_csv(file_to_load)
purchase_data.head()
#total_players = purchase_data.loc[:,["SN"]]
#number_of_players = total_players.count()
#number_of_players
total_players= len(purchase_data["SN"].value_counts())
total_players_df=pd.DataFrame({"Total Players": [total_players]})
total_players_df
unique_items = len(purchase_data["Item ID"].unique())
average_price = round(purchase_data["Price"].mean(),2)
total_purchases = len(purchase_data["Purchase ID"])
total_revenue = purchase_data["Price"].sum()
summary_df = pd.DataFrame({"Number of Unique Items": [unique_items],
"Average Price": [average_price],
"Number of Purchases": [total_purchases],
"Total Revenue" : [total_revenue]})
summary_df['Number of Unique Items'] = summary_df['Number of Unique Items'].map("${:,.2f}".format)
summary_df['Average Price'] = summary_df['Average Price'].map("${:,.2f}".format)
summary_df['Total Revenue'] = summary_df['Total Revenue'].map("${:,.2f}".format)
summary_df
# add in style dictionary with format
# summary table with Male/Female & counts, then percentage = total
gender_totals = purchase_data.groupby("Gender")["SN"].nunique()
gender_percent = round((gender_totals / total_players) * 100,2)
gender_df = pd.DataFrame({"Total Count":gender_totals,
"Percentage of Players": gender_percent})
gender_df['Percentage of Players'] = gender_df['Percentage of Players'].map("{:.2f}%".format)
gender_df
gender_purchase_count = purchase_data.groupby(["Gender"]).count()["Price"].rename('Purchase Count')
#gender_purchase_count
average_purchase_price = round(purchase_data.groupby(['Gender']).mean()['Price']).rename('Average Purchase Price')
#Purchase_Count = purchase_data_df.groupby(["Gender"]).count()["Purchase ID"].rename("Purchase Count")
grouped_total_price = purchase_data.groupby(["Gender"]).sum()["Price"].rename("Total Purchase Value")
per_person_total = grouped_total_price /gender_df["Total Count"] #price/mean
#purchase total/sum
gender_analysis = {'Purchase Count':gender_purchase_count,
'Average Purchase Price':average_purchase_price,
'Total Purchase Value':grouped_total_price,
'Avg Total Purchase per Person':per_person_total}
#Create the Gender Demographics DataFrame
gender_analysis_df = pd.DataFrame(gender_analysis)
# Format data to 1,000
gender_analysis_df['Average Purchase Price'] = gender_analysis_df['Average Purchase Price'].map("${:,.2f}".format)
gender_analysis_df['Total Purchase Value'] = gender_analysis_df['Total Purchase Value'].map("${:,.2f}".format)
gender_analysis_df['Avg Total Purchase per Person'] = gender_analysis_df['Avg Total Purchase per Person'].map("${:,.2f}".format)
gender_analysis_df
# AGE DEMOGRAPHICS
#Establish bins for ages
# Categorize the existing players using the age bins. Hint: use pd.cut()
# Calculate the numbers and percentages by age group
# Create a summary data frame to hold the results
# Optional: round the percentage column to two decimal points
# Display Age Demographics Table
purchase_data2= purchase_data.loc[:, ["SN", "Age"]]
# remove duplicates
cleaned_purchase_data = purchase_data2.drop_duplicates()
# create the bins in which the data will be held
bins = [0,9,14,19,24,29,34,39,200]
#print(len(bins))
# Create the names for the bins
age_groups = ['<10','10-14','15-19','20-24','25-29','30-34','35-39','40+']
#print(len(age_groups))
# create a group based off the bins
cleaned_purchase_data['Age'] = pd.cut(cleaned_purchase_data['Age'], bins, labels= age_groups, include_lowest = True)
age_groups_count = cleaned_purchase_data.groupby(["Age"]).count()["SN"].rename("Total Count")
percent_age_group = age_groups_count / int(total_players) * 100
# create dicitonary of lists
grouped_age = {'Total Count':age_groups_count,
'Percentage of Players':percent_age_group,}
# create gd df
gender_demographics_df = pd.DataFrame(grouped_age)
# format data
gender_demographics_df['Percentage of Players'] = gender_demographics_df['Percentage of Players'].map("{:.2f}%".format)
gender_demographics_df
# bin the purchase_data dataframe by age
# find purchase count
# avg. purchase price, avg. purchase total per person etc.
# create summary df
# format
purchase_data_df3 = purchase_data.loc[:, ["Age","Price","SN","Purchase ID"]]
# Create the bins in which Data will be held
bins = [0, 9, 14, 19, 24, 29, 34, 39, 500]
#print(len(bins))
# Create the names for the bins
age_groups = ["<10", "10-14", "15-19", "20-24", "25-29", "30-34", "35-39", "40+"]
#print(len(age_groups))
# Creating a group based off of the bins
purchase_data_df3["Age"] = pd.cut(purchase_data_df3["Age"], bins, labels=age_groups, include_lowest=True)
purchased_age_count = purchase_data_df3.groupby(["Age"]).count()["Purchase ID"].rename("Total Count")
avg_purchase_price = purchase_data_df3.groupby(["Age"]).mean()["Price"].rename("Average Purchase Price")
total_purchase_value = purchase_data_df3.groupby(["Age"]).sum()["Price"].rename("Total Purchase Value")
total_purchase_value
avg_purchase_total_per_person = total_purchase_value / grouped_age
# create dictionary of lists
age_purchase_analysis = {'Purchase Count' : purchased_age_count,
'Average Purchase Price' : avg_purchase_price,
'Total Purchase Value' : total_purchase_value,
'Avg Total Purchase per Person' : avg_purchase_total_per_person}
purchasing_analysis_age_df = pd.DataFrame(age_purchase_analysis)
purchasing_analysis_age_df['Average Purchase Price'] = purchasing_analysis_age_df['Average Purchase Price'].map("${:,.2f}".format)
purchasing_analysis_age_df['Total Purchase Value'] = purchasing_analysis_age_df['Total Purchase Value'].map("${:,.2f}".format)
purchasing_analysis_age_df['Avg Total Purchase per Person'] = purchasing_analysis_age_df['Avg Total Purchase per Person'].map("${:,.2f}".format)
purchasising_analysis_age_df
SN_purchase_data = purchase_data.loc[:, ["SN","Price","Purchase ID"]]
# Reference multiple columns within a DataFrame
grouped_SN_price = purchase_data.groupby(["SN"]).mean()["Price"].rename("Average Purchase Price")
grouped_SN_pur_id = purchase_data.groupby(["SN"]).count()["Purchase ID"].rename("Purchase Count")
grouped_SN_total_price = purchase_data.groupby(["SN"]).sum()["Price"].rename("Total Purchase Value")
# Initialize Dictionary of lists
Top_Spender_Analysis = {'Purchase Count':grouped_SN_pur_id,
'Average Purchase Price': grouped_SN_price,
'Total Purchase Value':grouped_SN_total_price,}
# Create the SN Spender DataFrame
Top_Spender_df = pd.DataFrame(Top_Spender_Analysis)
# Sort by Total Purchase Value
Top_Spender_df=Top_Spender_df.sort_values(['Total Purchase Value'], ascending=False)
# Format data to $1,000.00
Top_Spender_df['Average Purchase Price'] = Top_Spender_df['Average Purchase Price'].map("${:,.2f}".format)
Top_Spender_df['Total Purchase Value'] = Top_Spender_df['Total Purchase Value'].map("${:,.2f}".format)
#Show Data Frame
Top_Spender_df.head(5)
#Item_Data_df
Item_Data_df = purchase_data.loc[:, ["Item ID","Item Name","Price"]]
# Group Item_Data_df by ["Item ID", "Item Name"]
Grouped_Item_df =Item_Data_df.groupby(["Item ID", "Item Name"])
# Calculations on groupby DF
item_ID_count = Grouped_Item_df.count()["Price"].rename("Purchase Count")
item_ID_price = Grouped_Item_df.mean()["Price"].rename("Item Price")
total_purchase_value = Grouped_Item_df.sum()["Price"].rename("Total Purchase Value")
# Initialize Dictionary of lists
Popular_Items_Analysis = {'Purchase Count':item_ID_count,
'Item Price': item_ID_price,
'Total Purchase Value': total_purchase_value}
# Create the SN Spender DataFrame
Most_Popular_Items_df = pd.DataFrame(Popular_Items_Analysis)
# Sort by Purchase Count
Most_Popular_Items_df=Most_Popular_Items_df.sort_values(['Purchase Count'], ascending=False)
# Format data to $1,000.00
Most_Popular_Items_df['Item Price'] = Most_Popular_Items_df['Item Price'].map("${:,.2f}".format)
Most_Popular_Items_df['Total Purchase Value'] = Most_Popular_Items_df['Total Purchase Value'].map("${:,.2f}".format)
#Show Data Frame
Most_Popular_Items_df.head()
# Create the SN Spender DataFrame
Most_Popular_Items_df = pd.DataFrame(Popular_Items_Analysis)
# Sort by Total Purchase Value
Most_Popular_Items_df=Most_Popular_Items_df.sort_values(['Total Purchase Value'], ascending=False)
# Format data to $1,000.00
Most_Popular_Items_df['Item Price'] = Most_Popular_Items_df['Item Price'].map("${:,.2f}".format)
Most_Popular_Items_df['Total Purchase Value'] = Most_Popular_Items_df['Total Purchase Value'].map("${:,.2f}".format)
#Show Data Frame
Most_Popular_Items_df.head()
| 0.407216 | 0.76105 |
[](http://rpi.analyticsdojo.com)
<center><h1>Introduction to R - Tidyverse </h1></center>
<center><h3><a href = 'http://rpi.analyticsdojo.com'>rpi.analyticsdojo.com</a></h3></center>
## Overview
> It is often said that 80% of data analysis is spent on the process of cleaning and preparing the data. (Dasu and Johnson, 2003)
Thus before you can even get to doing any sort of sophisticated analysis or plotting, you'll generally first need to:
1. ***Manipulating*** data frames, e.g. filtering, summarizing, and conducting calculations across groups.
2. ***Tidying*** data into the appropriate format
# What is the Tidyverse?
## Tidyverse
- "The tidyverse is a set of packages that work in harmony because they share common data representations and API design." -Hadley Wickham
- The variety of packages include `dplyr`, `tibble`, `tidyr`, `readr`, `purrr` (and more).

- From [R for Data Science](http://r4ds.had.co.nz/explore-intro.html) by [Hadley Wickham](https://github.com/hadley)
## Schools of Thought
There are two competing schools of thought within the R community.
* We should stick to the base R functions to do manipulating and tidying; `tidyverse` uses syntax that's unlike base R and is superfluous.
* We should start teaching students to manipulate data using `tidyverse` tools because they are straightfoward to use, more readable than base R, and speed up the tidying process.
We'll show you some of the `tidyverse` tools so you can make an informed decision about whether you want to use base R or these newfangled packages.
## Dataframe Manipulation using Base R Functions
- So far, you’ve seen the basics of manipulating data frames, e.g. subsetting, merging, and basic calculations.
- For instance, we can use base R functions to calculate summary statistics across groups of observations,
- e.g. the mean GDP per capita within each region:
```
gapminder <- read.csv("../../input/gapminder-FiveYearData.csv",
stringsAsFactors = TRUE)
head(gapminder)
```
## But this isn't ideal because it involves a fair bit of repetition. Repeating yourself will cost you time, both now and later, and potentially introduce some nasty bugs.
# Dataframe Manipulation using dplyr
Here we're going to cover 6 of the most commonly used functions as well as using pipes (`%>%`) to combine them.
1. `select()`
2. `filter()`
3. `group_by()`
4. `summarize()`
5. `mutate()`
6. `arrange()`
If you have have not installed this package earlier, please do so now:
```r
install.packages('dplyr')
```
## Dataframe Manipulation using `dplyr`
Luckily, the [`dplyr`](https://cran.r-project.org/web/packages/dplyr/dplyr.pdf) package provides a number of very useful functions for manipulating dataframes. These functions will save you time by reducing repetition. As an added bonus, you might even find the `dplyr` grammar easier to read.
- ["A fast, consistent tool for working with data frame like objects, both in memory and out of memory."](https://cran.r-project.org/web/packages/dplyr/index.html)
- Subset observations using their value with `filter()`.
- Reorder rows using `arrange()`.
- Select columns using `select()`.
- Recode variables useing `mutate()`.
- Sumarize variables using `summarise()`.
```
#Now lets load some packages:
library(dplyr)
library(ggplot2)
library(tidyverse)
```
# dplyr select
Imagine that we just received the gapminder dataset, but are only interested in a few variables in it. We could use the `select()` function to keep only the columns corresponding to variables we select.
```
year_country_gdp <-gapminder[,c("year","country")]
year_country_gdp
year_country_gdp <- select(gapminder, year, country, gdpPercap)
head(year_country_gdp)
```
## dplyr Piping
- `%>%` Is used to help to write cleaner code.
- It is loaded by default when running the `tidyverse`, but it comes from the `magrittr` package.
- Input from one command is piped to another without saving directly in memory with an intermediate throwaway variable.
-Since the pipe grammar is unlike anything we've seen in R before, let's repeat what we've done above using pipes.
```
year_country_gdp <- gapminder %>% select(year,country,gdpPercap)
```
## dplyr filter
Now let's say we're only interested in African countries. We can combine `select` and `filter` to select only the observations where `continent` is `Africa`.
As with last time, first we pass the gapminder dataframe to the `filter()` function, then we pass the filtered version of the gapminder dataframe to the `select()` function.
To clarify, both the `select` and `filter` functions subsets the data frame. The difference is that `select` extracts certain *columns*, while `filter` extracts certain *rows*.
**Note:** The order of operations is very important in this case. If we used 'select' first, filter would not be able to find the variable `continent` since we would have removed it in the previous step.
```
year_country_gdp_africa <- gapminder %>%
filter(continent == "Africa") %>%
select(year,country,gdpPercap)
```
## dplyr Calculations Across Groups
A common task you'll encounter when working with data is running calculations on different groups within the data. For instance, what if we wanted to calculate the mean GDP per capita for each continent?
In base R, you would have to run the `mean()` function for each subset of data:
```
mean(gapminder[gapminder$continent == "Africa", "gdpPercap"])
mean(gapminder[gapminder$continent == "Americas", "gdpPercap"])
mean(gapminder[gapminder$continent == "Asia", "gdpPercap"])
```
# dplyr split-apply-combine
The abstract problem we're encountering here is know as "split-apply-combine":

We want to *split* our data into groups (in this case continents), *apply* some calculations on each group, then *combine* the results together afterwards.
Module 4 gave some ways to do split-apply-combine type stuff using the `apply` family of functions, but those are error prone and messy.
Luckily, `dplyr` offers a much cleaner, straight-forward solution to this problem.
```r
# remove this column -- there are two easy ways!
```
## dplyr group_by
We've already seen how `filter()` can help us select observations that meet certain criteria (in the above: `continent == "Europe"`). More helpful, however, is the `group_by()` function, which will essentially use every unique criteria that we could have used in `filter()`.
A `grouped_df` can be thought of as a `list` where each item in the `list` is a `data.frame` which contains only the rows that correspond to the a particular value `continent` (at least in the example above).

```
#Summarize returns a dataframe.
gdp_bycontinents <- gapminder %>%
group_by(continent) %>%
summarize(mean_gdpPercap = mean(gdpPercap))
head(gdp_bycontinents)
```

That allowed us to calculate the mean gdpPercap for each continent. But it gets even better -- the function `group_by()` allows us to group by multiple variables. Let's group by `year` and `continent`.
```
gdp_bycontinents_byyear <- gapminder %>%
group_by(continent, year) %>%
summarize(mean_gdpPercap = mean(gdpPercap))
gdp_bycontinents_byyear
mpg<-mpg
str(mpg)
```
### That is already quite powerful, but it gets even better! You're not limited to defining 1 new variable in `summarize()`.
```
gdp_pop_bycontinents_byyear <- gapminder %>%
group_by(continent, year) %>%
summarize(mean_gdpPercap = mean(gdpPercap),
sd_gdpPercap = sd(gdpPercap),
mean_pop = mean(pop),
sd_pop = sd(pop))
head(gdp_pop_bycontinents_byyear)
```
## Basics
- Use the mpg dataset to create summaries by manufacturer/year for 8 cyl vehicles.
```
mpg<-mpg
head(mpg)
#This just gives a dataframe with 70 obs, only 8 cylinder cars
mpg.8cyl<-mpg %>%
filter(cyl == 8)
mpg.8cyl
#Filter to only those cars that have miles per gallon equal to
mpg.8cyl<-mpg %>%
filter(cyl == 8)
#Alt Syntax
mpg.8cyl<-filter(mpg, cyl == 8)
mpg.8cyl
#Sort cars by MPG highway(hwy) then city(cty)
mpgsort<-arrange(mpg, hwy, cty)
mpgsort
#From the documentation https://cran.r-project.org/web/packages/dplyr/dplyr.pdf
select(iris, starts_with("petal")) #returns columns that start with "Petal"
select(iris, ends_with("width")) #returns columns that start with "Width"
select(iris, contains("etal"))
select(iris, matches(".t."))
select(iris, Petal.Length, Petal.Width)
vars <- c("Petal.Length", "Petal.Width")
select(iris, one_of(vars))
#Recoding Data
# See Creating new variables with mutate and ifelse:
# https://rstudio-pubs-static.s3.amazonaws.com/116317_e6922e81e72e4e3f83995485ce686c14.html
mutate(mpg, displ_l = displ / 61.0237)
# Example taken from David Ranzolin
# https://rstudio-pubs-static.s3.amazonaws.com/116317_e6922e81e72e4e3f83995485ce686c14.html#/9
section <- c("MATH111", "MATH111", "ENG111")
grade <- c(78, 93, 56)
student <- c("David", "Kristina", "Mycroft")
gradebook <- data.frame(section, grade, student)
#As the output is a tibble, here we are saving each intermediate version.
gradebook2<-mutate(gradebook, Pass.Fail = ifelse(grade > 60, "Pass", "Fail"))
gradebook3<-mutate(gradebook2, letter = ifelse(grade %in% 60:69, "D",
ifelse(grade %in% 70:79, "C",
ifelse(grade %in% 80:89, "B",
ifelse(grade %in% 90:99, "A", "F")))))
gradebook3
#Here we are using piping to do this more effectively.
gradebook4<-gradebook %>%
mutate(Pass.Fail = ifelse(grade > 60, "Pass", "Fail")) %>%
mutate(letter = ifelse(grade %in% 60:69, "D",
ifelse(grade %in% 70:79, "C",
ifelse(grade %in% 80:89, "B",
ifelse(grade %in% 90:99, "A", "F")))))
gradebook4
#find the average city and highway mpg
summarise(mpg, mean(cty), mean(hwy))
#find the average city and highway mpg by cylander
summarise(group_by(mpg, cyl), mean(cty), mean(hwy))
summarise(group_by(mtcars, cyl), m = mean(disp), sd = sd(disp))
# With data frames, you can create and immediately use summaries
by_cyl <- mtcars %>% group_by(cyl)
by_cyl %>% summarise(a = n(), b = a + 1)
```
#This was adopted from the Berkley R Bootcamp.
|
github_jupyter
|
gapminder <- read.csv("../../input/gapminder-FiveYearData.csv",
stringsAsFactors = TRUE)
head(gapminder)
install.packages('dplyr')
#Now lets load some packages:
library(dplyr)
library(ggplot2)
library(tidyverse)
year_country_gdp <-gapminder[,c("year","country")]
year_country_gdp
year_country_gdp <- select(gapminder, year, country, gdpPercap)
head(year_country_gdp)
year_country_gdp <- gapminder %>% select(year,country,gdpPercap)
year_country_gdp_africa <- gapminder %>%
filter(continent == "Africa") %>%
select(year,country,gdpPercap)
mean(gapminder[gapminder$continent == "Africa", "gdpPercap"])
mean(gapminder[gapminder$continent == "Americas", "gdpPercap"])
mean(gapminder[gapminder$continent == "Asia", "gdpPercap"])
# remove this column -- there are two easy ways!
#Summarize returns a dataframe.
gdp_bycontinents <- gapminder %>%
group_by(continent) %>%
summarize(mean_gdpPercap = mean(gdpPercap))
head(gdp_bycontinents)
gdp_bycontinents_byyear <- gapminder %>%
group_by(continent, year) %>%
summarize(mean_gdpPercap = mean(gdpPercap))
gdp_bycontinents_byyear
mpg<-mpg
str(mpg)
gdp_pop_bycontinents_byyear <- gapminder %>%
group_by(continent, year) %>%
summarize(mean_gdpPercap = mean(gdpPercap),
sd_gdpPercap = sd(gdpPercap),
mean_pop = mean(pop),
sd_pop = sd(pop))
head(gdp_pop_bycontinents_byyear)
mpg<-mpg
head(mpg)
#This just gives a dataframe with 70 obs, only 8 cylinder cars
mpg.8cyl<-mpg %>%
filter(cyl == 8)
mpg.8cyl
#Filter to only those cars that have miles per gallon equal to
mpg.8cyl<-mpg %>%
filter(cyl == 8)
#Alt Syntax
mpg.8cyl<-filter(mpg, cyl == 8)
mpg.8cyl
#Sort cars by MPG highway(hwy) then city(cty)
mpgsort<-arrange(mpg, hwy, cty)
mpgsort
#From the documentation https://cran.r-project.org/web/packages/dplyr/dplyr.pdf
select(iris, starts_with("petal")) #returns columns that start with "Petal"
select(iris, ends_with("width")) #returns columns that start with "Width"
select(iris, contains("etal"))
select(iris, matches(".t."))
select(iris, Petal.Length, Petal.Width)
vars <- c("Petal.Length", "Petal.Width")
select(iris, one_of(vars))
#Recoding Data
# See Creating new variables with mutate and ifelse:
# https://rstudio-pubs-static.s3.amazonaws.com/116317_e6922e81e72e4e3f83995485ce686c14.html
mutate(mpg, displ_l = displ / 61.0237)
# Example taken from David Ranzolin
# https://rstudio-pubs-static.s3.amazonaws.com/116317_e6922e81e72e4e3f83995485ce686c14.html#/9
section <- c("MATH111", "MATH111", "ENG111")
grade <- c(78, 93, 56)
student <- c("David", "Kristina", "Mycroft")
gradebook <- data.frame(section, grade, student)
#As the output is a tibble, here we are saving each intermediate version.
gradebook2<-mutate(gradebook, Pass.Fail = ifelse(grade > 60, "Pass", "Fail"))
gradebook3<-mutate(gradebook2, letter = ifelse(grade %in% 60:69, "D",
ifelse(grade %in% 70:79, "C",
ifelse(grade %in% 80:89, "B",
ifelse(grade %in% 90:99, "A", "F")))))
gradebook3
#Here we are using piping to do this more effectively.
gradebook4<-gradebook %>%
mutate(Pass.Fail = ifelse(grade > 60, "Pass", "Fail")) %>%
mutate(letter = ifelse(grade %in% 60:69, "D",
ifelse(grade %in% 70:79, "C",
ifelse(grade %in% 80:89, "B",
ifelse(grade %in% 90:99, "A", "F")))))
gradebook4
#find the average city and highway mpg
summarise(mpg, mean(cty), mean(hwy))
#find the average city and highway mpg by cylander
summarise(group_by(mpg, cyl), mean(cty), mean(hwy))
summarise(group_by(mtcars, cyl), m = mean(disp), sd = sd(disp))
# With data frames, you can create and immediately use summaries
by_cyl <- mtcars %>% group_by(cyl)
by_cyl %>% summarise(a = n(), b = a + 1)
| 0.629319 | 0.977197 |
```
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import nltk
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, multilabel_confusion_matrix, confusion_matrix
from sklearn.metrics import precision_recall_fscore_support as score
from sklearn.naive_bayes import MultinomialNB
import warnings
warnings.filterwarnings('ignore')
nltk.download('stopwords')
train_df = pd.read_csv("input.csv")
train_df.drop(columns=['Unnamed: 0'])
# yes = pd.read_csv("yes.csv")
# yes = yes["sequence"].to_list()
# no = pd.read_csv("no.csv")
# no = no["sequence"].to_list()
# neither = pd.read_csv("neither.csv")
# neither = neither["sequence"].to_list()
test_df = pd.read_csv("input_test.csv")
test_df.drop(columns=['Unnamed: 0'])
# train_yes = yes[:int(0.85 * len(yes))]
# test_yes = yes[int(0.85 * len(yes)):]
# train_no = no[:int(0.85 * len(no))]
# test_no = no[int(0.85 * len(no)):]
# train_neither = neither[:int(0.85 * len(neither))]
# test_neither = neither[int(0.85 * len(neither)):]
# train_x = train_yes + train_no + train_neither
# test_x = test_yes + test_no + test_neither
# train_y = np.append(np.ones((len(train_yes), 1)), np.zeros((len(train_neg), 1)), axis=0)
# test_y = np.append(np.ones((len(test_pos), 1)), np.zeros((len(test_neg), 1)), axis=0)
# Create a transformation pipeline
# The pipeline sequentially applies a list of transforms and as a final estimator logistic regression
pipeline_log = Pipeline([
('count', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', LogisticRegression(solver='lbfgs', multi_class='auto')),
])
# Train model using the created sklearn pipeline
model_name = 'logistic regression classifier'
model_lgr = pipeline_log.fit(train_df['sequence'], train_df['label'])
def evaluate_results(model, test_df):
# Predict class labels using the learner function
test_df['pred'] = model_lgr.predict(test_df['sequence'])
y_true = test_df['label']
y_pred = test_df['pred']
target_names = ['NO', 'NTR', 'YES']
# Print the Confusion Matrix
results_log = classification_report(y_true, y_pred, target_names=target_names, output_dict=True)
results_df_log = pd.DataFrame(results_log).transpose()
print(results_df_log)
matrix = confusion_matrix(y_true, y_pred)
sns.heatmap(pd.DataFrame(matrix),
annot=True, fmt="d", linewidths=.5, cmap="YlGnBu")
plt.xlabel('Predictions')
plt.xlabel('Actual')
model_score = score(y_pred, y_true, average='macro')
return model_score
# Evaluate model performance
model_score = evaluate_results(model_lgr, test_df)
performance_df = pd.DataFrame().append({'model_name': model_name,
'f1_score': model_score[0],
'precision': model_score[1],
'recall': model_score[2]}, ignore_index=True)
model_name = 'bayes classifier'
pipeline_bayes = Pipeline([
('count', CountVectorizer()),
('tfidf', TfidfTransformer()),
('gnb', MultinomialNB()),
])
# Train model using the created sklearn pipeline
model_bayes = pipeline_bayes.fit(train_df['sequence'], train_df['label'])
# Evaluate model performance
model_score = evaluate_results(model_bayes, test_df)
performance_df = performance_df.append({'model_name': model_name,
'f1_score': model_score[0],
'precision': model_score[1],
'recall': model_score[2]}, ignore_index=True)
prediction_sequences = ['PHQ-2 Score: 0 Cognition Negative: no evidence of cognitive decline noted by patient or family; no memory problems causing dysfunction in daily activities Falls risk Time to rise from, walk 10 feet,',
'depression, but certainly does not appear depressed on exam - Dementia: MMSE on 5/21/16 23/30 c/w Mild cognitive impairment, which is NOT c/w profound weight loss - Gastroparesis: Hx of diabetes',
'THEY DO NOT HAVE DEMENTIA',
'tojguiegbhutrebjg bljtmhtnoery0og[wob erjbgt4iu5gbyi ]']
for seq in prediction_sequences:
ans = model_lgr.predict([seq])
d = {1: 'Negative', 2: 'Neither', 3: 'Positive'}
print(seq + '-> ' + d[ans[0]], "\n")
prediction_sequences = ['PHQ-2 Score: 0 Cognition Negative: no evidence of cognitive decline noted by patient or family; no memory problems causing dysfunction in daily activities Falls risk Time to rise from, walk 10 feet,',
'depression, but certainly does not appear depressed on exam - Dementia: MMSE on 5/21/16 23/30 c/w Mild cognitive impairment, which is NOT c/w profound weight loss - Gastroparesis: Hx of diabetes',
'THEY DO NOT HAVE DEMENTIA',
'tojguiegbhutrebjg bljtmhtnoery0og[wob erjbgt4iu5gbyi ]']
for seq in prediction_sequences:
ans = model_bayes.predict([seq])
d = {1: 'Negative', 2: 'Neither', 3: 'Positive'}
print(seq + '-> ' + d[ans[0]], "\n")
```
|
github_jupyter
|
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import nltk
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, multilabel_confusion_matrix, confusion_matrix
from sklearn.metrics import precision_recall_fscore_support as score
from sklearn.naive_bayes import MultinomialNB
import warnings
warnings.filterwarnings('ignore')
nltk.download('stopwords')
train_df = pd.read_csv("input.csv")
train_df.drop(columns=['Unnamed: 0'])
# yes = pd.read_csv("yes.csv")
# yes = yes["sequence"].to_list()
# no = pd.read_csv("no.csv")
# no = no["sequence"].to_list()
# neither = pd.read_csv("neither.csv")
# neither = neither["sequence"].to_list()
test_df = pd.read_csv("input_test.csv")
test_df.drop(columns=['Unnamed: 0'])
# train_yes = yes[:int(0.85 * len(yes))]
# test_yes = yes[int(0.85 * len(yes)):]
# train_no = no[:int(0.85 * len(no))]
# test_no = no[int(0.85 * len(no)):]
# train_neither = neither[:int(0.85 * len(neither))]
# test_neither = neither[int(0.85 * len(neither)):]
# train_x = train_yes + train_no + train_neither
# test_x = test_yes + test_no + test_neither
# train_y = np.append(np.ones((len(train_yes), 1)), np.zeros((len(train_neg), 1)), axis=0)
# test_y = np.append(np.ones((len(test_pos), 1)), np.zeros((len(test_neg), 1)), axis=0)
# Create a transformation pipeline
# The pipeline sequentially applies a list of transforms and as a final estimator logistic regression
pipeline_log = Pipeline([
('count', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', LogisticRegression(solver='lbfgs', multi_class='auto')),
])
# Train model using the created sklearn pipeline
model_name = 'logistic regression classifier'
model_lgr = pipeline_log.fit(train_df['sequence'], train_df['label'])
def evaluate_results(model, test_df):
# Predict class labels using the learner function
test_df['pred'] = model_lgr.predict(test_df['sequence'])
y_true = test_df['label']
y_pred = test_df['pred']
target_names = ['NO', 'NTR', 'YES']
# Print the Confusion Matrix
results_log = classification_report(y_true, y_pred, target_names=target_names, output_dict=True)
results_df_log = pd.DataFrame(results_log).transpose()
print(results_df_log)
matrix = confusion_matrix(y_true, y_pred)
sns.heatmap(pd.DataFrame(matrix),
annot=True, fmt="d", linewidths=.5, cmap="YlGnBu")
plt.xlabel('Predictions')
plt.xlabel('Actual')
model_score = score(y_pred, y_true, average='macro')
return model_score
# Evaluate model performance
model_score = evaluate_results(model_lgr, test_df)
performance_df = pd.DataFrame().append({'model_name': model_name,
'f1_score': model_score[0],
'precision': model_score[1],
'recall': model_score[2]}, ignore_index=True)
model_name = 'bayes classifier'
pipeline_bayes = Pipeline([
('count', CountVectorizer()),
('tfidf', TfidfTransformer()),
('gnb', MultinomialNB()),
])
# Train model using the created sklearn pipeline
model_bayes = pipeline_bayes.fit(train_df['sequence'], train_df['label'])
# Evaluate model performance
model_score = evaluate_results(model_bayes, test_df)
performance_df = performance_df.append({'model_name': model_name,
'f1_score': model_score[0],
'precision': model_score[1],
'recall': model_score[2]}, ignore_index=True)
prediction_sequences = ['PHQ-2 Score: 0 Cognition Negative: no evidence of cognitive decline noted by patient or family; no memory problems causing dysfunction in daily activities Falls risk Time to rise from, walk 10 feet,',
'depression, but certainly does not appear depressed on exam - Dementia: MMSE on 5/21/16 23/30 c/w Mild cognitive impairment, which is NOT c/w profound weight loss - Gastroparesis: Hx of diabetes',
'THEY DO NOT HAVE DEMENTIA',
'tojguiegbhutrebjg bljtmhtnoery0og[wob erjbgt4iu5gbyi ]']
for seq in prediction_sequences:
ans = model_lgr.predict([seq])
d = {1: 'Negative', 2: 'Neither', 3: 'Positive'}
print(seq + '-> ' + d[ans[0]], "\n")
prediction_sequences = ['PHQ-2 Score: 0 Cognition Negative: no evidence of cognitive decline noted by patient or family; no memory problems causing dysfunction in daily activities Falls risk Time to rise from, walk 10 feet,',
'depression, but certainly does not appear depressed on exam - Dementia: MMSE on 5/21/16 23/30 c/w Mild cognitive impairment, which is NOT c/w profound weight loss - Gastroparesis: Hx of diabetes',
'THEY DO NOT HAVE DEMENTIA',
'tojguiegbhutrebjg bljtmhtnoery0og[wob erjbgt4iu5gbyi ]']
for seq in prediction_sequences:
ans = model_bayes.predict([seq])
d = {1: 'Negative', 2: 'Neither', 3: 'Positive'}
print(seq + '-> ' + d[ans[0]], "\n")
| 0.645232 | 0.363195 |
# Heat Numba Serial
Numba Serial Implementation of the Test Problem
```
! python --version
import numba
print(numba.__version__)
import numpy
numpy.show_config()
```
### Main
```
%%writefile st-nu-seq.py
import numpy as np
from numba import jit, config, prange
from time import time
config.DUMP_ASSEMBLY = 0
config.NUMBA_ENABLE_AVX = 1
config.NUMBA_NUM_THREADS = 1
@jit('(float64[:,:],float64[:,:])', nopython=True, parallel=True, nogil=True)
def kernel_seq(anew, aold) :
anew[1:-1, 1:-1]=1/2.0*(aold[1:-1,1:-1]+1/4.0*(aold[2:,1:-1]+aold[:-2,1:-1]+aold[1:-1,2:]+aold[1:-1,:-2]))
n = 4800 # nxn grid (4800,1,500)=1500; (4800,1,5)=12
energy = 1.0 # energy to be injected per iteration
niters = 500 # number of iterations
nsources = 3 # sources of energy
size = n + 2
sizeEnd = n + 1
heat = np.zeros((1), np.float64) # system total heat
anew = np.zeros((size, size), np.float64)
aold = np.zeros((size, size), np.float64)
sources = np.empty((nsources, 2), np.int16)
sources[:,:] = [ [n//2, n//2], [n//3, n//3], [n*4//5, n*8//9] ]
niters = (niters + 1) // 2
t0 = time()
for iters in range(niters) :
kernel_seq(anew, aold)
for i in range(nsources) :
anew[sources[i, 0], sources[i, 1]] += energy
kernel_seq(aold, anew)
for i in range(nsources) :
aold[sources[i, 0], sources[i, 1]] += energy
heat[0] = np.sum( aold[1:-1, 1:-1] ) # system total heat
t0 = time() - t0
print("Heat = %0.4f | Tempo = %0.4f | Thread count = %s" %
(heat[0], t0, config.NUMBA_NUM_THREADS))
```
### Check
```
! python st-nu-seq-2.py
```
### Slurm script
```
%%writefile st-nu-seq.srm
#!/bin/bash
# limites das filas (1,0 UA):
# cpu_dev : 20 min., 1-4 nós, 1/1 tarefas em exec/fila máximo
# cpu_small: 72 horas, 1-20 nós, 16/96 tarefas em exec/fila máximo
#SBATCH --ntasks=1 #Total de tarefas
#SBATCH -p cpu_small #Fila (partition) a ser utilizada
#SBATCH -J stnuseq #Nome do job, 8 caracteres
#SBATCH --time=00:05:00 #Tempo max. de execução
#SBATCH --exclusive #Utilização exclusiva dos nós
echo '========================================'
echo '- Job ID:' $SLURM_JOB_ID
echo '- Tarefas por no:' $SLURM_NTASKS_PER_NODE
echo '- Qtd. de nos:' $SLURM_JOB_NUM_NODES
echo '- Tot. de tarefas:' $SLURM_NTASKS
echo '- Nos alocados:' $SLURM_JOB_NODELIST
echo '- diretorio onde sbatch foi chamado ($SLURM_SUBMIT_DIR):'
echo $SLURM_SUBMIT_DIR
cd $SLURM_SUBMIT_DIR
nodeset -e $SLURM_JOB_NODELIST
#Entra no diretório de trabalho
cd /scratch/xxxx/xxxx/stnc/Numba
#Modulos
module load anaconda3/2018.12
#Executavel
EXEC='python st-nu-seq.py'
#Dispara a execucao
echo '-- srun -------------------------------'
echo '$ srun --mpi=pmi2 -n' $SLURM_NTASKS $EXEC
srun --mpi=pmi2 -n $SLURM_NTASKS $EXEC
echo '-- FIM --------------------------------'
```
### Run
```
%%bash
a='st-nu-seq.py'
# diretórios já devem existir
s='/prj/yyyy/xxxx/stnc/Numba'
d='/scratch/yyyy/xxxx/stnc/Numba'
cp $s/$a $d
ls -lh $d/$a
%%bash
sbatch st-nu-seq.srm
! squeue -n stnuseq
! squeue -n stnuseq
%%bash
d='/scratch/yyyy/xxxx/stnc/Numba'
cat $d/slurm-781495.out
%%bash
sbatch st-nu-seq.srm
sbatch st-nu-seq.srm
! squeue -n stnuseq
! squeue -n stnuseq
%%bash
d='/scratch/yyyy/xxxx/stnc/Numba'
cat $d/slurm-788104.out
cat $d/slurm-788105.out
```
|
github_jupyter
|
! python --version
import numba
print(numba.__version__)
import numpy
numpy.show_config()
%%writefile st-nu-seq.py
import numpy as np
from numba import jit, config, prange
from time import time
config.DUMP_ASSEMBLY = 0
config.NUMBA_ENABLE_AVX = 1
config.NUMBA_NUM_THREADS = 1
@jit('(float64[:,:],float64[:,:])', nopython=True, parallel=True, nogil=True)
def kernel_seq(anew, aold) :
anew[1:-1, 1:-1]=1/2.0*(aold[1:-1,1:-1]+1/4.0*(aold[2:,1:-1]+aold[:-2,1:-1]+aold[1:-1,2:]+aold[1:-1,:-2]))
n = 4800 # nxn grid (4800,1,500)=1500; (4800,1,5)=12
energy = 1.0 # energy to be injected per iteration
niters = 500 # number of iterations
nsources = 3 # sources of energy
size = n + 2
sizeEnd = n + 1
heat = np.zeros((1), np.float64) # system total heat
anew = np.zeros((size, size), np.float64)
aold = np.zeros((size, size), np.float64)
sources = np.empty((nsources, 2), np.int16)
sources[:,:] = [ [n//2, n//2], [n//3, n//3], [n*4//5, n*8//9] ]
niters = (niters + 1) // 2
t0 = time()
for iters in range(niters) :
kernel_seq(anew, aold)
for i in range(nsources) :
anew[sources[i, 0], sources[i, 1]] += energy
kernel_seq(aold, anew)
for i in range(nsources) :
aold[sources[i, 0], sources[i, 1]] += energy
heat[0] = np.sum( aold[1:-1, 1:-1] ) # system total heat
t0 = time() - t0
print("Heat = %0.4f | Tempo = %0.4f | Thread count = %s" %
(heat[0], t0, config.NUMBA_NUM_THREADS))
! python st-nu-seq-2.py
%%writefile st-nu-seq.srm
#!/bin/bash
# limites das filas (1,0 UA):
# cpu_dev : 20 min., 1-4 nós, 1/1 tarefas em exec/fila máximo
# cpu_small: 72 horas, 1-20 nós, 16/96 tarefas em exec/fila máximo
#SBATCH --ntasks=1 #Total de tarefas
#SBATCH -p cpu_small #Fila (partition) a ser utilizada
#SBATCH -J stnuseq #Nome do job, 8 caracteres
#SBATCH --time=00:05:00 #Tempo max. de execução
#SBATCH --exclusive #Utilização exclusiva dos nós
echo '========================================'
echo '- Job ID:' $SLURM_JOB_ID
echo '- Tarefas por no:' $SLURM_NTASKS_PER_NODE
echo '- Qtd. de nos:' $SLURM_JOB_NUM_NODES
echo '- Tot. de tarefas:' $SLURM_NTASKS
echo '- Nos alocados:' $SLURM_JOB_NODELIST
echo '- diretorio onde sbatch foi chamado ($SLURM_SUBMIT_DIR):'
echo $SLURM_SUBMIT_DIR
cd $SLURM_SUBMIT_DIR
nodeset -e $SLURM_JOB_NODELIST
#Entra no diretório de trabalho
cd /scratch/xxxx/xxxx/stnc/Numba
#Modulos
module load anaconda3/2018.12
#Executavel
EXEC='python st-nu-seq.py'
#Dispara a execucao
echo '-- srun -------------------------------'
echo '$ srun --mpi=pmi2 -n' $SLURM_NTASKS $EXEC
srun --mpi=pmi2 -n $SLURM_NTASKS $EXEC
echo '-- FIM --------------------------------'
%%bash
a='st-nu-seq.py'
# diretórios já devem existir
s='/prj/yyyy/xxxx/stnc/Numba'
d='/scratch/yyyy/xxxx/stnc/Numba'
cp $s/$a $d
ls -lh $d/$a
%%bash
sbatch st-nu-seq.srm
! squeue -n stnuseq
! squeue -n stnuseq
%%bash
d='/scratch/yyyy/xxxx/stnc/Numba'
cat $d/slurm-781495.out
%%bash
sbatch st-nu-seq.srm
sbatch st-nu-seq.srm
! squeue -n stnuseq
! squeue -n stnuseq
%%bash
d='/scratch/yyyy/xxxx/stnc/Numba'
cat $d/slurm-788104.out
cat $d/slurm-788105.out
| 0.194062 | 0.575588 |
```
import pandas as pd
import numpy as np
files = ['01-15-morgan-merged.csv']
f = open('dlresult.txt', 'a')
f.write('datadl'+'='*30+'\n')
Merged = pd.DataFrame()
origin_total = 0
for filen in files:
try:
data = pd.read_csv(filen, encoding='gb18030')
except Exception as e:
print(filen, ': ', e.__class__.__name__, e)
else:
origin_shape = data.shape
origin_total += origin_shape[0]
col_names = data.columns
print(filen, origin_shape, data.shape)
Merged = pd.concat([Merged, data], axis=0)
print('Shape: ', Merged.shape)
print(col_names)
test_idx = [734, 413, 276, 690, 356, 681, 623, 94, 766, 811, 81, 32, 614,
815, 581, 137, 494, 578, 699, 254]
test_data = Merged.iloc[test_idx, :]
train_data = Merged.iloc[list(set(Merged.index)-set(test_idx)), :]
print(test_data.shape, train_data.shape)
print(test_data.index)
print(train_data.index[:33])
train_data.to_csv('01-15-morgan-train.csv', index=False, encoding='gb18030')
test_data.to_csv('01-15-morgan-test.csv', index=False, encoding='gb18030')
narow = [28]
Merged = Merged.drop(narow, axis=0)
print(Merged.shape)
Merged.to_csv(files[0], encoding='gb18030', index=False)
Merged.index = [x for x in range(Merged.shape[0])]
Merged_last_col = Merged.iloc[:, -1]
print(len(Merged_last_col))
print(Merged_last_col[0])
dellist = []
for i in range(len(Merged_last_col)):
element = Merged_last_col[i]
try:
element = float(element)
except Exception as e:
dellist.append(i)
print(s, ': ', e.__class__.__name__, e, ': ', s)
else:
Merged.iloc[i, -1] = element
for i in dellist:
Merged = Merged.drop(i, axis=0)
Merged.to_csv('Merged.csv', encoding='gb18030', index=0, header=False)
Merged.to_csv?
col_names = Merged.columns
colNum = np.array([x for x in range(len(col_names))])
indexNum = np.array([x for x in range(Merged.shape[0])])
Merged_last_col = pd.DataFrame(Merged.iloc[:, -1])
sigM = Merged_last_col.applymap(np.isreal)
sigMall = sigM.all(1)
index = indexNum[~sigMall]
delList = []
for i in index:
col = colNum[~sigM.iloc[i]]
for j in col:
s = Merged[col_names[j]].iloc[i]
try:
s = float(s)
except Exception as e:
print(s, ': ', e.__class__.__name__, e)
print('[', i, ', ', col_names[j], ']')
delList.append(i)
break
else:
Merged[col_names[j]].iloc[i] = s
for i in delList:
Merged = Merged.drop(i, axis=0)
print('Merged: ', Merged.shape)
Merged.index = [x for x in range(Merged.shape[0])]
Merged.to_csv('Merged.csv', index=0, encoding='gb18030')
try:
Merged = pd.read_csv('Merged.csv', encoding='gb18030').astype('float')
except Exception as e:
print('Merged: ', e.__class__.__name__, e)
else:
print('Datadl Succeed')
Merged.applymap?
```
**加入onehot标签类别**
```
Merged = pd.DataFrame()
origin_total = 0
for filen in files:
try:
data = pd.read_csv(filen, encoding='gb18030')
except Exception as e:
print(filen, ': ', e.__class__.__name__, e)
else:
origin_shape = data.shape
origin_total += origin_shape[0]
col_names = data.columns
if 'temperature' not in filen:
data = data.drop(col_names[0], axis=1)
data = data.drop(col_names[-1], axis=1)
print(filen, origin_shape, data.shape)
Merged = pd.concat([Merged, data], axis=1)
print('Shape: ', Merged.shape)
colnames = Merged.columns
ee_col = Merged[colnames[-1]]
Merged = Merged.drop(colnames[-1], axis=1)
Merged = pd.concat([Merged, labels_hot_pd], axis=1)
Merged = pd.concat([Merged, ee_col], axis=1)
print('Shape: ', Merged.shape)
col_names = Merged.columns
print('直接删除无效值: ', Merged.dropna().shape)
X = Merged.iloc[:,:-1]
y = Merged.iloc[:,-1]
X = X.dropna(axis=1)
# X = X.select_dtypes(['number'])
Merged = pd.concat([X,y], axis=1)
print('Shape: ', Merged.shape)
Merged.index = [x for x in range(Merged.shape[0])]
Merged_origin = Merged
Merged_origin.to_csv('Merged_Origin.csv', index=0, encoding='gb18030')
col_names = Merged.columns
colNum = np.array([x for x in range(len(col_names))])
indexNum = np.array([x for x in range(Merged.shape[0])])
sigM = Merged.applymap(np.isreal)
sigMall = sigM.all(1)
index = indexNum[~sigMall]
delList = []
for i in index:
col = colNum[~sigM.iloc[i]]
for j in col:
s = Merged[col_names[j]].iloc[i]
try:
s = float(s)
except Exception as e:
print(s, ': ', e.__class__.__name__, e)
print('[', i, ', ', col_names[j], ']')
delList.append(i)
break
else:
Merged[col_names[j]].iloc[i] = s
for i in delList:
Merged = Merged.drop(i, axis=0)
print('Merged: ', Merged.shape)
Merged.to_csv('Merged.csv', index=0, encoding='gb18030')
try:
Merged = pd.read_csv('Merged.csv', encoding='gb18030').astype('float')
except Exception as e:
print('Merged: ', e.__class__.__name__, e)
else:
print('Datadl Succeed')
```
|
github_jupyter
|
import pandas as pd
import numpy as np
files = ['01-15-morgan-merged.csv']
f = open('dlresult.txt', 'a')
f.write('datadl'+'='*30+'\n')
Merged = pd.DataFrame()
origin_total = 0
for filen in files:
try:
data = pd.read_csv(filen, encoding='gb18030')
except Exception as e:
print(filen, ': ', e.__class__.__name__, e)
else:
origin_shape = data.shape
origin_total += origin_shape[0]
col_names = data.columns
print(filen, origin_shape, data.shape)
Merged = pd.concat([Merged, data], axis=0)
print('Shape: ', Merged.shape)
print(col_names)
test_idx = [734, 413, 276, 690, 356, 681, 623, 94, 766, 811, 81, 32, 614,
815, 581, 137, 494, 578, 699, 254]
test_data = Merged.iloc[test_idx, :]
train_data = Merged.iloc[list(set(Merged.index)-set(test_idx)), :]
print(test_data.shape, train_data.shape)
print(test_data.index)
print(train_data.index[:33])
train_data.to_csv('01-15-morgan-train.csv', index=False, encoding='gb18030')
test_data.to_csv('01-15-morgan-test.csv', index=False, encoding='gb18030')
narow = [28]
Merged = Merged.drop(narow, axis=0)
print(Merged.shape)
Merged.to_csv(files[0], encoding='gb18030', index=False)
Merged.index = [x for x in range(Merged.shape[0])]
Merged_last_col = Merged.iloc[:, -1]
print(len(Merged_last_col))
print(Merged_last_col[0])
dellist = []
for i in range(len(Merged_last_col)):
element = Merged_last_col[i]
try:
element = float(element)
except Exception as e:
dellist.append(i)
print(s, ': ', e.__class__.__name__, e, ': ', s)
else:
Merged.iloc[i, -1] = element
for i in dellist:
Merged = Merged.drop(i, axis=0)
Merged.to_csv('Merged.csv', encoding='gb18030', index=0, header=False)
Merged.to_csv?
col_names = Merged.columns
colNum = np.array([x for x in range(len(col_names))])
indexNum = np.array([x for x in range(Merged.shape[0])])
Merged_last_col = pd.DataFrame(Merged.iloc[:, -1])
sigM = Merged_last_col.applymap(np.isreal)
sigMall = sigM.all(1)
index = indexNum[~sigMall]
delList = []
for i in index:
col = colNum[~sigM.iloc[i]]
for j in col:
s = Merged[col_names[j]].iloc[i]
try:
s = float(s)
except Exception as e:
print(s, ': ', e.__class__.__name__, e)
print('[', i, ', ', col_names[j], ']')
delList.append(i)
break
else:
Merged[col_names[j]].iloc[i] = s
for i in delList:
Merged = Merged.drop(i, axis=0)
print('Merged: ', Merged.shape)
Merged.index = [x for x in range(Merged.shape[0])]
Merged.to_csv('Merged.csv', index=0, encoding='gb18030')
try:
Merged = pd.read_csv('Merged.csv', encoding='gb18030').astype('float')
except Exception as e:
print('Merged: ', e.__class__.__name__, e)
else:
print('Datadl Succeed')
Merged.applymap?
Merged = pd.DataFrame()
origin_total = 0
for filen in files:
try:
data = pd.read_csv(filen, encoding='gb18030')
except Exception as e:
print(filen, ': ', e.__class__.__name__, e)
else:
origin_shape = data.shape
origin_total += origin_shape[0]
col_names = data.columns
if 'temperature' not in filen:
data = data.drop(col_names[0], axis=1)
data = data.drop(col_names[-1], axis=1)
print(filen, origin_shape, data.shape)
Merged = pd.concat([Merged, data], axis=1)
print('Shape: ', Merged.shape)
colnames = Merged.columns
ee_col = Merged[colnames[-1]]
Merged = Merged.drop(colnames[-1], axis=1)
Merged = pd.concat([Merged, labels_hot_pd], axis=1)
Merged = pd.concat([Merged, ee_col], axis=1)
print('Shape: ', Merged.shape)
col_names = Merged.columns
print('直接删除无效值: ', Merged.dropna().shape)
X = Merged.iloc[:,:-1]
y = Merged.iloc[:,-1]
X = X.dropna(axis=1)
# X = X.select_dtypes(['number'])
Merged = pd.concat([X,y], axis=1)
print('Shape: ', Merged.shape)
Merged.index = [x for x in range(Merged.shape[0])]
Merged_origin = Merged
Merged_origin.to_csv('Merged_Origin.csv', index=0, encoding='gb18030')
col_names = Merged.columns
colNum = np.array([x for x in range(len(col_names))])
indexNum = np.array([x for x in range(Merged.shape[0])])
sigM = Merged.applymap(np.isreal)
sigMall = sigM.all(1)
index = indexNum[~sigMall]
delList = []
for i in index:
col = colNum[~sigM.iloc[i]]
for j in col:
s = Merged[col_names[j]].iloc[i]
try:
s = float(s)
except Exception as e:
print(s, ': ', e.__class__.__name__, e)
print('[', i, ', ', col_names[j], ']')
delList.append(i)
break
else:
Merged[col_names[j]].iloc[i] = s
for i in delList:
Merged = Merged.drop(i, axis=0)
print('Merged: ', Merged.shape)
Merged.to_csv('Merged.csv', index=0, encoding='gb18030')
try:
Merged = pd.read_csv('Merged.csv', encoding='gb18030').astype('float')
except Exception as e:
print('Merged: ', e.__class__.__name__, e)
else:
print('Datadl Succeed')
| 0.104107 | 0.394434 |
<i>Copyright (c) Microsoft Corporation. All rights reserved.</i>
<i>Licensed under the MIT License.</i>
# SAR Single Node on MovieLens (Python, CPU)
In this example, we will walk through each step of the Simple Algorithm for Recommendation (SAR) algorithm using a Python single-node implementation.
SAR is a fast, scalable, adaptive algorithm for personalized recommendations based on user transaction history. It is powered by understanding the similarity between items, and recommending similar items to those a user has an existing affinity for.
## 1 SAR algorithm
The following figure presents a high-level architecture of SAR.
At a very high level, two intermediate matrices are created and used to generate a set of recommendation scores:
- An item similarity matrix $S$ estimates item-item relationships.
- An affinity matrix $A$ estimates user-item relationships.
Recommendation scores are then created by computing the matrix multiplication $A\times S$.
Optional steps (e.g. "time decay" and "remove seen items") are described in the details below.
<img src="https://recodatasets.blob.core.windows.net/images/sar_schema.svg?sanitize=true">
### 1.1 Compute item co-occurrence and item similarity
SAR defines similarity based on item-to-item co-occurrence data. Co-occurrence is defined as the number of times two items appear together for a given user. We can represent the co-occurrence of all items as a $m\times m$ matrix $C$, where $c_{i,j}$ is the number of times item $i$ occurred with item $j$, and $m$ is the total number of items.
The co-occurence matric $C$ has the following properties:
- It is symmetric, so $c_{i,j} = c_{j,i}$
- It is nonnegative: $c_{i,j} \geq 0$
- The occurrences are at least as large as the co-occurrences. I.e., the largest element for each row (and column) is on the main diagonal: $\forall(i,j) C_{i,i},C_{j,j} \geq C_{i,j}$.
Once we have a co-occurrence matrix, an item similarity matrix $S$ can be obtained by rescaling the co-occurrences according to a given metric. Options for the metric include `Jaccard`, `lift`, and `counts` (meaning no rescaling).
If $c_{ii}$ and $c_{jj}$ are the $i$th and $j$th diagonal elements of $C$, the rescaling options are:
- `Jaccard`: $s_{ij}=\frac{c_{ij}}{(c_{ii}+c_{jj}-c_{ij})}$
- `lift`: $s_{ij}=\frac{c_{ij}}{(c_{ii} \times c_{jj})}$
- `counts`: $s_{ij}=c_{ij}$
In general, using `counts` as a similarity metric favours predictability, meaning that the most popular items will be recommended most of the time. `lift` by contrast favours discoverability/serendipity: an item that is less popular overall but highly favoured by a small subset of users is more likely to be recommended. `Jaccard` is a compromise between the two.
### 1.2 Compute user affinity scores
The affinity matrix in SAR captures the strength of the relationship between each individual user and the items that user has already interacted with. SAR incorporates two factors that can impact users' affinities:
- It can consider information about the **type** of user-item interaction through differential weighting of different events (e.g. it may weigh events in which a user rated a particular item more heavily than events in which a user viewed the item).
- It can consider information about **when** a user-item event occurred (e.g. it may discount the value of events that take place in the distant past.
Formalizing these factors produces us an expression for user-item affinity:
$$a_{ij}=\sum_k w_k \left(\frac{1}{2}\right)^{\frac{t_0-t_k}{T}} $$
where the affinity $a_{ij}$ for user $i$ and item $j$ is the weighted sum of all $k$ events involving user $i$ and item $j$. $w_k$ represents the weight of a particular event, and the power of 2 term reflects the temporally-discounted event. The $(\frac{1}{2})^n$ scaling factor causes the parameter $T$ to serve as a half-life: events $T$ units before $t_0$ will be given half the weight as those taking place at $t_0$.
Repeating this computation for all $n$ users and $m$ items results in an $n\times m$ matrix $A$. Simplifications of the above expression can be obtained by setting all the weights equal to 1 (effectively ignoring event types), or by setting the half-life parameter $T$ to infinity (ignoring transaction times).
### 1.3 Remove seen item
Optionally we remove items which have already been seen in the training set, i.e. don't recommend items which have been previously bought by the user again.
### 1.4 Top-k item calculation
The personalized recommendations for a set of users can then be obtained by multiplying the affinity matrix ($A$) by the similarity matrix ($S$). The result is a recommendation score matrix, where each row corresponds to a user, each column corresponds to an item, and each entry corresponds to a user / item pair. Higher scores correspond to more strongly recommended items.
It is worth noting that the complexity of recommending operation depends on the data size. SAR algorithm itself has $O(n^3)$ complexity. Therefore the single-node implementation is not supposed to handle large dataset in a scalable manner. Whenever one uses the algorithm, it is recommended to run with sufficiently large memory.
## 2 SAR single-node implementation
The SAR implementation illustrated in this notebook was developed in Python, primarily with Python packages like `numpy`, `pandas`, and `scipy` which are commonly used in most of the data analytics / machine learning tasks. Details of the implementation can be found in [Recommenders/reco_utils/recommender/sar/sar_singlenode.py](../../reco_utils/recommender/sar/sar_singlenode.py).
## 3 SAR single-node based movie recommender
```
# set the environment path to find Recommenders
import sys
sys.path.append("../../")
import itertools
import logging
import os
import numpy as np
import pandas as pd
import papermill as pm
from reco_utils.dataset import movielens
from reco_utils.dataset.python_splitters import python_stratified_split
from reco_utils.evaluation.python_evaluation import map_at_k, ndcg_at_k, precision_at_k, recall_at_k
from reco_utils.recommender.sar.sar_singlenode import SARSingleNode
print("System version: {}".format(sys.version))
print("Pandas version: {}".format(pd.__version__))
# top k items to recommend
TOP_K = 10
# Select MovieLens data size: 100k, 1m, 10m, or 20m
MOVIELENS_DATA_SIZE = '100k'
```
### 3.1 Load Data
SAR is intended to be used on interactions with the following schema:
`<User ID>, <Item ID>, <Time>`.
Each row represents a single interaction between a user and an item. These interactions might be different types of events on an e-commerce website, such as a user clicking to view an item, adding it to a shopping basket, following a recommendation link, and so on.
The MovieLens dataset is well formatted interactions of Users providing Ratings to Movies (movie ratings are used as the event weight) - we will use it for the rest of the example.
```
data = movielens.load_pandas_df(
size=MOVIELENS_DATA_SIZE,
header=['UserId', 'MovieId', 'Rating', 'Timestamp'],
title_col='Title'
)
# Convert the float precision to 32-bit in order to reduce memory consumption
data.loc[:, 'Rating'] = data['Rating'].astype(np.float32)
data.head()
```
### 3.2 Split the data using the python random splitter provided in utilities:
We split the full dataset into a `train` and `test` dataset to evaluate performance of the algorithm against a held-out set not seen during training. Because SAR generates recommendations based on user preferences, all users that are in the test set must also exist in the training set. For this case, we can use the provided `python_stratified_split` function which holds out a percentage (in this case 25%) of items from each user, but ensures all users are in both `train` and `test` datasets. Other options are available in the `dataset.python_splitters` module which provide more control over how the split occurs.
```
header = {
"col_user": "UserId",
"col_item": "MovieId",
"col_rating": "Rating",
"col_timestamp": "Timestamp",
"col_prediction": "Prediction",
}
train, test = python_stratified_split(data, ratio=0.75, col_user=header["col_user"], col_item=header["col_item"], seed=42)
```
In this case, for the illustration purpose, the following parameter values are used:
|Parameter|Value|Description|
|---------|---------|-------------|
|`similarity_type`|`jaccard`|Method used to calculate item similarity.|
|`time_decay_coefficient`|30|Period in days (term of $T$ shown in the formula of Section 1.2)|
|`time_now`|`None`|Time decay reference.|
|`timedecay_formula`|`True`|Whether time decay formula is used.|
```
# set log level to INFO
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)-8s %(message)s')
model = SARSingleNode(
similarity_type="jaccard",
time_decay_coefficient=30,
time_now=None,
timedecay_formula=True,
**header
)
model.fit(train)
top_k = model.recommend_k_items(test, remove_seen=True)
```
The final output from the `recommend_k_items` method generates recommendation scores for each user-item pair, which are shown as follows.
```
top_k_with_titles = (top_k.join(data[['MovieId', 'Title']].drop_duplicates().set_index('MovieId'),
on='MovieId',
how='inner').sort_values(by=['UserId', 'Prediction'], ascending=False))
display(top_k_with_titles.head(10))
```
### 3.3 Evaluate the results
It should be known that the recommendation scores generated by multiplying the item similarity matrix $S$ and the user affinity matrix $A$ **DOES NOT** have the same scale with the original explicit ratings in the movielens dataset. That is to say, SAR algorithm is meant for the task of *recommending relevent items to users* rather than *predicting explicit ratings for user-item pairs*.
To this end, ranking metrics like precision@k, recall@k, etc., are more applicable to evaluate SAR algorithm. The following illustrates how to evaluate SAR model by using the evaluation functions provided in the `reco_utils`.
```
# all ranking metrics have the same arguments
args = [test, top_k]
kwargs = dict(col_user='UserId',
col_item='MovieId',
col_rating='Rating',
col_prediction='Prediction',
relevancy_method='top_k',
k=TOP_K)
eval_map = map_at_k(*args, **kwargs)
eval_ndcg = ndcg_at_k(*args, **kwargs)
eval_precision = precision_at_k(*args, **kwargs)
eval_recall = recall_at_k(*args, **kwargs)
print(f"Model:",
f"Top K:\t\t {TOP_K}",
f"MAP:\t\t {eval_map:f}",
f"NDCG:\t\t {eval_ndcg:f}",
f"Precision@K:\t {eval_precision:f}",
f"Recall@K:\t {eval_recall:f}", sep='\n')
```
## References
Note SAR is a combinational algorithm that implements different industry heuristics. The followings are references that may be helpful in understanding the SAR logic and implementation.
1. Badrul Sarwar, *et al*, "Item-based collaborative filtering recommendation algorithms", WWW, 2001.
2. Scipy (sparse matrix), url: https://docs.scipy.org/doc/scipy/reference/sparse.html
3. Asela Gunawardana and Guy Shani, "A survey of accuracy evaluation metrics of recommendation tasks", The Journal of Machine Learning Research, vol. 10, pp 2935-2962, 2009.
|
github_jupyter
|
# set the environment path to find Recommenders
import sys
sys.path.append("../../")
import itertools
import logging
import os
import numpy as np
import pandas as pd
import papermill as pm
from reco_utils.dataset import movielens
from reco_utils.dataset.python_splitters import python_stratified_split
from reco_utils.evaluation.python_evaluation import map_at_k, ndcg_at_k, precision_at_k, recall_at_k
from reco_utils.recommender.sar.sar_singlenode import SARSingleNode
print("System version: {}".format(sys.version))
print("Pandas version: {}".format(pd.__version__))
# top k items to recommend
TOP_K = 10
# Select MovieLens data size: 100k, 1m, 10m, or 20m
MOVIELENS_DATA_SIZE = '100k'
data = movielens.load_pandas_df(
size=MOVIELENS_DATA_SIZE,
header=['UserId', 'MovieId', 'Rating', 'Timestamp'],
title_col='Title'
)
# Convert the float precision to 32-bit in order to reduce memory consumption
data.loc[:, 'Rating'] = data['Rating'].astype(np.float32)
data.head()
header = {
"col_user": "UserId",
"col_item": "MovieId",
"col_rating": "Rating",
"col_timestamp": "Timestamp",
"col_prediction": "Prediction",
}
train, test = python_stratified_split(data, ratio=0.75, col_user=header["col_user"], col_item=header["col_item"], seed=42)
# set log level to INFO
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)-8s %(message)s')
model = SARSingleNode(
similarity_type="jaccard",
time_decay_coefficient=30,
time_now=None,
timedecay_formula=True,
**header
)
model.fit(train)
top_k = model.recommend_k_items(test, remove_seen=True)
top_k_with_titles = (top_k.join(data[['MovieId', 'Title']].drop_duplicates().set_index('MovieId'),
on='MovieId',
how='inner').sort_values(by=['UserId', 'Prediction'], ascending=False))
display(top_k_with_titles.head(10))
# all ranking metrics have the same arguments
args = [test, top_k]
kwargs = dict(col_user='UserId',
col_item='MovieId',
col_rating='Rating',
col_prediction='Prediction',
relevancy_method='top_k',
k=TOP_K)
eval_map = map_at_k(*args, **kwargs)
eval_ndcg = ndcg_at_k(*args, **kwargs)
eval_precision = precision_at_k(*args, **kwargs)
eval_recall = recall_at_k(*args, **kwargs)
print(f"Model:",
f"Top K:\t\t {TOP_K}",
f"MAP:\t\t {eval_map:f}",
f"NDCG:\t\t {eval_ndcg:f}",
f"Precision@K:\t {eval_precision:f}",
f"Recall@K:\t {eval_recall:f}", sep='\n')
| 0.371479 | 0.977176 |
Saturation curves for SM-omics and ST<br>
Input files are generated by counting number of unique molecules and number of annotated reads per annotated region after adjusting for sequencing depth, in downsampled fastq files (proportions 0.001, 0.01, 0.05, 0.1, 0.2, 0.4, 0.6, 0.8, 1) processed using ST-pipeline.<br>
```
%matplotlib inline
import os
import numpy
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
import glob
import warnings
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
warnings.filterwarnings('ignore')
def condition(row):
""" Takes row in pandas df as input and returns type of condition
"""
# The samples are run in triplicate based on condition
condition = ['sm-omics', 'ST']
if row['Name'] in ['10015CN84_D2', '10015CN84_C2', '10015CN60_E2']:
return condition[0]
elif row['Name'] in ['10005CN48_C1','10005CN48_D1','10005CN48_E1']:
return condition[1]
# Load input files
path = '../../smomics_data'
stats_list = []
samples_list = ['10005CN48_C1', '10005CN48_D1', '10005CN48_E1', '10015CN84_D2', '10015CN84_C2', '10015CN60_E2']
spots_under_tissue = {'10005CN48_C1':258,
'10005CN48_D1':252,
'10005CN48_E1':203,
'10015CN84_D2': 201,
'10015CN84_C2': 241,
'10015CN60_E2':235}
prop_list = [0.001, 0.01, 0.05, 0.1, 0.2, 0.4, 0.6, 0.8, 1]
for filename in samples_list:
cond_file = pd.read_csv(os.path.join(path, filename + '_umi_after_seq_depth_in_spots_under_outside_tissue.txt'), sep = '\t')
if numpy.unique(cond_file['Name']) == '10005CN60_E2':
cond_file['Name'] = '10015CN60_E2'
print(cond_file)
cond_file.sort_values(by='Num reads', inplace=True)
cond_file['Prop_annot_reads'] = prop_list
cond_file['Condition'] = cond_file.apply(lambda row: condition(row), axis = 1)
cond_file['norm uniq mol inside'] = cond_file['UMI inside']
cond_file['norm uniq mol outside'] = cond_file['UMI outside']
stats_list.append(cond_file)
# Concat all files
cond_merge = pd.concat(stats_list)
#Plot
fig = plt.figure(figsize=(20, 10))
x="Prop_annot_reads"
y="norm uniq mol inside"
#y="Genes"
hue='Condition'
################ LINE PLOT
ax = sns.lineplot(x=x, y=y, data=cond_merge,hue=hue,
palette =['cadetblue', 'lightcoral'], hue_order = ['sm-omics', 'ST'],ci=95)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_color('k')
ax.spines['left'].set_color('k')
# X and y label size
ax.set_xlabel("Proportion annotated reads", fontsize=15)
ax.set_ylabel("Number of unique molecules under tissue", fontsize=15)
# Set ticks size
ax.tick_params(axis='y', labelsize=15)
ax.tick_params(axis='x', labelsize=15)
# change background color
back_c = 'white'
ax.set_facecolor(back_c)
ax.grid(False)
# Thousand seprator on y axis
ax.get_yaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
# LEGEND
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles=handles[0:], labels=['sm-omics', 'ST'],loc='upper left', ncol=2, fontsize=20)
fig.set_size_inches(20, 10)
# plt.savefig("../../figures/saturation_sm_st_total_umis_inside.pdf", transparent=True, bbox_inches = 'tight',
# pad_inches = 0, dpi=1200)
plt.show()
cond_file['Prop_annot_reads'] = 100*cond_file['Prop_annot_reads']
#cond_merge.to_csv('../../smomics_data/sm_st_unique_molecules_under_outside_tissue.csv')
cond_merge
```
|
github_jupyter
|
%matplotlib inline
import os
import numpy
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
import glob
import warnings
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
warnings.filterwarnings('ignore')
def condition(row):
""" Takes row in pandas df as input and returns type of condition
"""
# The samples are run in triplicate based on condition
condition = ['sm-omics', 'ST']
if row['Name'] in ['10015CN84_D2', '10015CN84_C2', '10015CN60_E2']:
return condition[0]
elif row['Name'] in ['10005CN48_C1','10005CN48_D1','10005CN48_E1']:
return condition[1]
# Load input files
path = '../../smomics_data'
stats_list = []
samples_list = ['10005CN48_C1', '10005CN48_D1', '10005CN48_E1', '10015CN84_D2', '10015CN84_C2', '10015CN60_E2']
spots_under_tissue = {'10005CN48_C1':258,
'10005CN48_D1':252,
'10005CN48_E1':203,
'10015CN84_D2': 201,
'10015CN84_C2': 241,
'10015CN60_E2':235}
prop_list = [0.001, 0.01, 0.05, 0.1, 0.2, 0.4, 0.6, 0.8, 1]
for filename in samples_list:
cond_file = pd.read_csv(os.path.join(path, filename + '_umi_after_seq_depth_in_spots_under_outside_tissue.txt'), sep = '\t')
if numpy.unique(cond_file['Name']) == '10005CN60_E2':
cond_file['Name'] = '10015CN60_E2'
print(cond_file)
cond_file.sort_values(by='Num reads', inplace=True)
cond_file['Prop_annot_reads'] = prop_list
cond_file['Condition'] = cond_file.apply(lambda row: condition(row), axis = 1)
cond_file['norm uniq mol inside'] = cond_file['UMI inside']
cond_file['norm uniq mol outside'] = cond_file['UMI outside']
stats_list.append(cond_file)
# Concat all files
cond_merge = pd.concat(stats_list)
#Plot
fig = plt.figure(figsize=(20, 10))
x="Prop_annot_reads"
y="norm uniq mol inside"
#y="Genes"
hue='Condition'
################ LINE PLOT
ax = sns.lineplot(x=x, y=y, data=cond_merge,hue=hue,
palette =['cadetblue', 'lightcoral'], hue_order = ['sm-omics', 'ST'],ci=95)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_color('k')
ax.spines['left'].set_color('k')
# X and y label size
ax.set_xlabel("Proportion annotated reads", fontsize=15)
ax.set_ylabel("Number of unique molecules under tissue", fontsize=15)
# Set ticks size
ax.tick_params(axis='y', labelsize=15)
ax.tick_params(axis='x', labelsize=15)
# change background color
back_c = 'white'
ax.set_facecolor(back_c)
ax.grid(False)
# Thousand seprator on y axis
ax.get_yaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
# LEGEND
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles=handles[0:], labels=['sm-omics', 'ST'],loc='upper left', ncol=2, fontsize=20)
fig.set_size_inches(20, 10)
# plt.savefig("../../figures/saturation_sm_st_total_umis_inside.pdf", transparent=True, bbox_inches = 'tight',
# pad_inches = 0, dpi=1200)
plt.show()
cond_file['Prop_annot_reads'] = 100*cond_file['Prop_annot_reads']
#cond_merge.to_csv('../../smomics_data/sm_st_unique_molecules_under_outside_tissue.csv')
cond_merge
| 0.431105 | 0.780746 |
# Introduction
In a prior notebook, documents were partitioned by assigning them to the domain with the highest Dice similarity of their term and structure occurrences. Here, we'll assess whether the observed modularity of the partitions is greater than expected by chance. Modularity will be measured by the ratio of dispersion between partitions to dispersion within partitions.
# Load the data
```
import pandas as pd
import numpy as np
np.random.seed(42)
import sys
sys.path.append("..")
import utilities
from style import style
framework = "data-driven"
version = 190325 # Document-term matrix version
suffix = "" # Suffix for term lists
clf = "_lr" # Suffix for data-driven classifiers
n_iter = 1000 # Iterations for bootstrap and null
dx = [0.34, 0.355, 0.38, 0.32, 0.34, 0.34] # Nudges for plotted means
alpha = 0.001 # Significance level for statistical comparisons
```
## Brain activation coordinates
```
act_bin = utilities.load_coordinates()
print("Document N={}, Structure N={}".format(
act_bin.shape[0], act_bin.shape[1]))
```
## Document-term matrix
```
dtm_bin = utilities.load_doc_term_matrix(version=version, binarize=True)
print("Document N={}, Term N={}".format(
dtm_bin.shape[0], dtm_bin.shape[1]))
```
## Domain archetypes
```
from collections import OrderedDict
lists, circuits = utilities.load_framework(framework, suffix=suffix, clf=clf)
words = sorted(list(set(lists["TOKEN"])))
structures = sorted(list(set(act_bin.columns)))
domains = list(OrderedDict.fromkeys(lists["DOMAIN"]))
archetypes = pd.DataFrame(0.0, index=words+structures, columns=domains)
for dom in domains:
for word in lists.loc[lists["DOMAIN"] == dom, "TOKEN"]:
archetypes.loc[word, dom] = 1.0
for struct in structures:
archetypes.loc[struct, dom] = circuits.loc[struct, dom]
archetypes[archetypes > 0.0] = 1.0
print("Term & Structure N={}, Domain N={}".format(
archetypes.shape[0], archetypes.shape[1]))
```
## Document structure-term vectors
```
pmids = dtm_bin.index.intersection(act_bin.index)
len(pmids)
dtm_words = dtm_bin.loc[pmids, words]
act_structs = act_bin.loc[pmids, structures]
docs = dtm_words.copy()
docs[structures] = act_structs.copy()
docs.head()
```
## Document splits
```
splits = {}
splits["discovery"] = [int(pmid.strip()) for pmid in open("../data/splits/train.txt")]
splits["replication"] = [int(pmid.strip()) for pmid in open("../data/splits/validation.txt")]
splits["replication"] += [int(pmid.strip()) for pmid in open("../data/splits/test.txt")]
for split, pmids in splits.items():
print("{:12s} N={}".format(split.title(), len(pmids)))
```
## Document assignments
```
doc2dom_df = pd.read_csv("../partition/data/doc2dom_{}{}.csv".format(framework, clf),
header=None, index_col=0)
doc2dom = {int(pmid): str(dom.values[0]) for pmid, dom in doc2dom_df.iterrows()}
dom2docs = {dom: {split: [] for split in ["discovery", "replication"]} for dom in domains}
for doc, dom in doc2dom.items():
for split, split_pmids in splits.items():
if doc in splits[split]:
dom2docs[dom][split].append(doc)
sorted_pmids = {}
for split, split_pmids in splits.items():
sorted_pmids[split] = []
for dom in domains:
sorted_pmids[split] += [pmid for pmid, sys in doc2dom.items() if sys == dom and pmid in split_pmids]
sorted_pmids["discovery"][:5]
```
# Compute document distances
Indexing by min:max will be faster in subsequent computations
```
from scipy.spatial.distance import cdist
doc_dists = {}
for split in splits.keys():
ids = sorted_pmids[split]
doc_dists[split] = cdist(docs.loc[ids], docs.loc[ids], metric="dice")
doc_dists[split] = pd.DataFrame(doc_dists[split], index=ids, columns=ids)
```
## Compute domain min and max indices
```
dom_idx = {}
for split in splits.keys():
dom_idx[split] = {dom: {"min": 0, "max": 0} for dom in domains}
for dom in domains:
dom_pmids = dom2docs[dom][split]
dom_idx[split][dom]["min"] = sorted_pmids[split].index(dom_pmids[0])
dom_idx[split][dom]["max"] = sorted_pmids[split].index(dom_pmids[-1]) + 1
```
# Compute domain modularity
## Observed values
### Distances internal and external to articles in each domain
```
dists_int, dists_ext = {}, {}
for split, split_pmids in splits.items():
dists_int[split], dists_ext[split] = {}, {}
for dom in domains:
dom_min, dom_max = dom_idx[split][dom]["min"], dom_idx[split][dom]["max"]
dom_dists = doc_dists[split].values[:,dom_min:dom_max][dom_min:dom_max,:]
dists_int[split][dom] = dom_dists
other_dists_lower = doc_dists[split].values[:,dom_min:dom_max][:dom_min,:]
other_dists_upper = doc_dists[split].values[:,dom_min:dom_max][dom_max:,:]
other_dists = np.concatenate((other_dists_lower, other_dists_upper))
dists_ext[split][dom] = other_dists
```
### Article-level ratio of external to internal distances
```
df_stat = {}
pmid_list, split_list, dom_list, obs_list = [], [], [], []
for split, split_pmids in splits.items():
df_stat[split] = pd.DataFrame(index=domains, columns=["OBSERVED"])
for dom in domains:
mean_dist_int = np.mean(dists_int[split][dom], axis=0)
mean_dist_ext = np.mean(dists_ext[split][dom], axis=0)
df_stat[split].loc[dom, "OBSERVED"] = np.nanmean(mean_dist_ext / mean_dist_int)
obs = mean_dist_ext / mean_dist_int
pmid_list += dom2docs[dom][split]
dom_list += [dom] * len(obs)
split_list += [split] * len(obs)
obs_list += list(obs)
df_obs = pd.DataFrame({"PMID": pmid_list, "SPLIT": split_list,
"DOMAIN": dom_list, "OBSERVED": obs_list})
df_obs.to_csv("data/mod_obs_{}{}.csv".format(framework, clf))
df_obs.head()
for split in splits.keys():
obs = df_stat[split]["OBSERVED"].mean()
print("{:28s} {:6.4f}".format(split.title() + " set modularity", obs))
```
## Null distributions
```
import os
df_null = {}
for split, split_pmids in splits.items():
print("Processing {} split (N={} documents)".format(split, len(split_pmids)))
n_docs = len(split_pmids)
file_null = "data/mod_null_{}{}_{}_{}iter.csv".format(framework, clf, split, n_iter)
if not os.path.isfile(file_null):
df_null[split] = np.empty((len(domains), n_iter))
for i, dom in enumerate(domains):
print("----- Processing {}".format(dom))
n_dom_docs = dists_int[split][dom].shape[0]
dist_int_ext = np.concatenate((dists_int[split][dom], dists_ext[split][dom]))
for n in range(n_iter):
null = np.random.choice(range(n_docs), size=n_docs, replace=False)
dist_int_ext_null = dist_int_ext[null,:]
mean_dist_int = np.mean(dist_int_ext_null[:n_dom_docs,:], axis=0)
mean_dist_ext = np.mean(dist_int_ext_null[n_dom_docs:,:], axis=0)
df_null[split][i,n] = np.nanmean(mean_dist_ext / mean_dist_int)
df_null[split] = pd.DataFrame(df_null[split], index=domains, columns=range(n_iter))
df_null[split].to_csv(file_null)
print("")
else:
df_null[split] = pd.read_csv(file_null, index_col=0, header=0)
```
### Interleave splits to facilitate plotting
```
df_null_interleaved = pd.DataFrame()
null_idx = []
for dom in domains:
for split in ["discovery", "replication"]:
df_null_interleaved = df_null_interleaved.append(df_null[split].loc[dom])
null_idx.append(dom + "_" + split)
df_null_interleaved.index = null_idx
df_null_interleaved.head()
```
## Bootstrap distributions
```
df_boot = {}
for split, split_pmids in splits.items():
print("Processing {} split (N={} documents)".format(split, len(split_pmids)))
file_boot = "data/mod_boot_{}{}_{}_{}iter.csv".format(framework, clf, split, n_iter)
if not os.path.isfile(file_boot):
df_boot[split] = np.empty((len(domains), n_iter))
for i, dom in enumerate(domains):
print("----- Processing {}".format(dom))
n_docs = dists_int[split][dom].shape[0]
for n in range(n_iter):
boot = np.random.choice(range(n_docs), size=n_docs, replace=True)
mean_dist_int = np.mean(dists_int[split][dom][:,boot], axis=0)
mean_dist_ext = np.mean(dists_ext[split][dom][:,boot], axis=0)
df_boot[split][i,n] = np.nanmean(mean_dist_ext / mean_dist_int)
df_boot[split] = pd.DataFrame(df_boot[split], index=domains, columns=range(n_iter))
df_boot[split].to_csv(file_boot)
print("")
else:
df_boot[split] = pd.read_csv(file_boot, index_col=0, header=0)
```
# Perform significance testing
```
from statsmodels.stats import multitest
for split in splits.keys():
pval = []
for dom in domains:
dom_null = df_null[split].loc[dom].values
dom_obs = float(df_stat[split].loc[dom, "OBSERVED"])
p = np.sum(dom_null >= dom_obs) / float(n_iter)
pval.append(p)
df_stat[split].loc[dom, "P"] = p
df_stat[split]["FDR"] = multitest.multipletests(pval, method="fdr_bh")[1]
for dom in domains:
if df_stat[split].loc[dom, "FDR"] < alpha:
df_stat[split].loc[dom, "STARS"] = "*"
else:
df_stat[split].loc[dom, "STARS"] = ""
df_stat[split] = df_stat[split].loc[domains, ["OBSERVED", "P", "FDR", "STARS"]]
df_stat[split].to_csv("data/mod_mean_{}{}_{}.csv".format(framework, clf, split))
print("-" * 65 + "\n" + split.upper() + "\n" + "-" * 65)
print(df_stat[split])
print("")
```
# Plot results
```
%matplotlib inline
utilities.plot_split_violins(framework, domains, df_obs, df_null_interleaved, df_stat,
style.palettes[framework], metric="mod", dx=dx,
ylim=[0.75,1.75], yticks=[0.75,1,1.25,1.5,1.75],
interval=0.999, alphas=[0], suffix=clf)
```
|
github_jupyter
|
import pandas as pd
import numpy as np
np.random.seed(42)
import sys
sys.path.append("..")
import utilities
from style import style
framework = "data-driven"
version = 190325 # Document-term matrix version
suffix = "" # Suffix for term lists
clf = "_lr" # Suffix for data-driven classifiers
n_iter = 1000 # Iterations for bootstrap and null
dx = [0.34, 0.355, 0.38, 0.32, 0.34, 0.34] # Nudges for plotted means
alpha = 0.001 # Significance level for statistical comparisons
act_bin = utilities.load_coordinates()
print("Document N={}, Structure N={}".format(
act_bin.shape[0], act_bin.shape[1]))
dtm_bin = utilities.load_doc_term_matrix(version=version, binarize=True)
print("Document N={}, Term N={}".format(
dtm_bin.shape[0], dtm_bin.shape[1]))
from collections import OrderedDict
lists, circuits = utilities.load_framework(framework, suffix=suffix, clf=clf)
words = sorted(list(set(lists["TOKEN"])))
structures = sorted(list(set(act_bin.columns)))
domains = list(OrderedDict.fromkeys(lists["DOMAIN"]))
archetypes = pd.DataFrame(0.0, index=words+structures, columns=domains)
for dom in domains:
for word in lists.loc[lists["DOMAIN"] == dom, "TOKEN"]:
archetypes.loc[word, dom] = 1.0
for struct in structures:
archetypes.loc[struct, dom] = circuits.loc[struct, dom]
archetypes[archetypes > 0.0] = 1.0
print("Term & Structure N={}, Domain N={}".format(
archetypes.shape[0], archetypes.shape[1]))
pmids = dtm_bin.index.intersection(act_bin.index)
len(pmids)
dtm_words = dtm_bin.loc[pmids, words]
act_structs = act_bin.loc[pmids, structures]
docs = dtm_words.copy()
docs[structures] = act_structs.copy()
docs.head()
splits = {}
splits["discovery"] = [int(pmid.strip()) for pmid in open("../data/splits/train.txt")]
splits["replication"] = [int(pmid.strip()) for pmid in open("../data/splits/validation.txt")]
splits["replication"] += [int(pmid.strip()) for pmid in open("../data/splits/test.txt")]
for split, pmids in splits.items():
print("{:12s} N={}".format(split.title(), len(pmids)))
doc2dom_df = pd.read_csv("../partition/data/doc2dom_{}{}.csv".format(framework, clf),
header=None, index_col=0)
doc2dom = {int(pmid): str(dom.values[0]) for pmid, dom in doc2dom_df.iterrows()}
dom2docs = {dom: {split: [] for split in ["discovery", "replication"]} for dom in domains}
for doc, dom in doc2dom.items():
for split, split_pmids in splits.items():
if doc in splits[split]:
dom2docs[dom][split].append(doc)
sorted_pmids = {}
for split, split_pmids in splits.items():
sorted_pmids[split] = []
for dom in domains:
sorted_pmids[split] += [pmid for pmid, sys in doc2dom.items() if sys == dom and pmid in split_pmids]
sorted_pmids["discovery"][:5]
from scipy.spatial.distance import cdist
doc_dists = {}
for split in splits.keys():
ids = sorted_pmids[split]
doc_dists[split] = cdist(docs.loc[ids], docs.loc[ids], metric="dice")
doc_dists[split] = pd.DataFrame(doc_dists[split], index=ids, columns=ids)
dom_idx = {}
for split in splits.keys():
dom_idx[split] = {dom: {"min": 0, "max": 0} for dom in domains}
for dom in domains:
dom_pmids = dom2docs[dom][split]
dom_idx[split][dom]["min"] = sorted_pmids[split].index(dom_pmids[0])
dom_idx[split][dom]["max"] = sorted_pmids[split].index(dom_pmids[-1]) + 1
dists_int, dists_ext = {}, {}
for split, split_pmids in splits.items():
dists_int[split], dists_ext[split] = {}, {}
for dom in domains:
dom_min, dom_max = dom_idx[split][dom]["min"], dom_idx[split][dom]["max"]
dom_dists = doc_dists[split].values[:,dom_min:dom_max][dom_min:dom_max,:]
dists_int[split][dom] = dom_dists
other_dists_lower = doc_dists[split].values[:,dom_min:dom_max][:dom_min,:]
other_dists_upper = doc_dists[split].values[:,dom_min:dom_max][dom_max:,:]
other_dists = np.concatenate((other_dists_lower, other_dists_upper))
dists_ext[split][dom] = other_dists
df_stat = {}
pmid_list, split_list, dom_list, obs_list = [], [], [], []
for split, split_pmids in splits.items():
df_stat[split] = pd.DataFrame(index=domains, columns=["OBSERVED"])
for dom in domains:
mean_dist_int = np.mean(dists_int[split][dom], axis=0)
mean_dist_ext = np.mean(dists_ext[split][dom], axis=0)
df_stat[split].loc[dom, "OBSERVED"] = np.nanmean(mean_dist_ext / mean_dist_int)
obs = mean_dist_ext / mean_dist_int
pmid_list += dom2docs[dom][split]
dom_list += [dom] * len(obs)
split_list += [split] * len(obs)
obs_list += list(obs)
df_obs = pd.DataFrame({"PMID": pmid_list, "SPLIT": split_list,
"DOMAIN": dom_list, "OBSERVED": obs_list})
df_obs.to_csv("data/mod_obs_{}{}.csv".format(framework, clf))
df_obs.head()
for split in splits.keys():
obs = df_stat[split]["OBSERVED"].mean()
print("{:28s} {:6.4f}".format(split.title() + " set modularity", obs))
import os
df_null = {}
for split, split_pmids in splits.items():
print("Processing {} split (N={} documents)".format(split, len(split_pmids)))
n_docs = len(split_pmids)
file_null = "data/mod_null_{}{}_{}_{}iter.csv".format(framework, clf, split, n_iter)
if not os.path.isfile(file_null):
df_null[split] = np.empty((len(domains), n_iter))
for i, dom in enumerate(domains):
print("----- Processing {}".format(dom))
n_dom_docs = dists_int[split][dom].shape[0]
dist_int_ext = np.concatenate((dists_int[split][dom], dists_ext[split][dom]))
for n in range(n_iter):
null = np.random.choice(range(n_docs), size=n_docs, replace=False)
dist_int_ext_null = dist_int_ext[null,:]
mean_dist_int = np.mean(dist_int_ext_null[:n_dom_docs,:], axis=0)
mean_dist_ext = np.mean(dist_int_ext_null[n_dom_docs:,:], axis=0)
df_null[split][i,n] = np.nanmean(mean_dist_ext / mean_dist_int)
df_null[split] = pd.DataFrame(df_null[split], index=domains, columns=range(n_iter))
df_null[split].to_csv(file_null)
print("")
else:
df_null[split] = pd.read_csv(file_null, index_col=0, header=0)
df_null_interleaved = pd.DataFrame()
null_idx = []
for dom in domains:
for split in ["discovery", "replication"]:
df_null_interleaved = df_null_interleaved.append(df_null[split].loc[dom])
null_idx.append(dom + "_" + split)
df_null_interleaved.index = null_idx
df_null_interleaved.head()
df_boot = {}
for split, split_pmids in splits.items():
print("Processing {} split (N={} documents)".format(split, len(split_pmids)))
file_boot = "data/mod_boot_{}{}_{}_{}iter.csv".format(framework, clf, split, n_iter)
if not os.path.isfile(file_boot):
df_boot[split] = np.empty((len(domains), n_iter))
for i, dom in enumerate(domains):
print("----- Processing {}".format(dom))
n_docs = dists_int[split][dom].shape[0]
for n in range(n_iter):
boot = np.random.choice(range(n_docs), size=n_docs, replace=True)
mean_dist_int = np.mean(dists_int[split][dom][:,boot], axis=0)
mean_dist_ext = np.mean(dists_ext[split][dom][:,boot], axis=0)
df_boot[split][i,n] = np.nanmean(mean_dist_ext / mean_dist_int)
df_boot[split] = pd.DataFrame(df_boot[split], index=domains, columns=range(n_iter))
df_boot[split].to_csv(file_boot)
print("")
else:
df_boot[split] = pd.read_csv(file_boot, index_col=0, header=0)
from statsmodels.stats import multitest
for split in splits.keys():
pval = []
for dom in domains:
dom_null = df_null[split].loc[dom].values
dom_obs = float(df_stat[split].loc[dom, "OBSERVED"])
p = np.sum(dom_null >= dom_obs) / float(n_iter)
pval.append(p)
df_stat[split].loc[dom, "P"] = p
df_stat[split]["FDR"] = multitest.multipletests(pval, method="fdr_bh")[1]
for dom in domains:
if df_stat[split].loc[dom, "FDR"] < alpha:
df_stat[split].loc[dom, "STARS"] = "*"
else:
df_stat[split].loc[dom, "STARS"] = ""
df_stat[split] = df_stat[split].loc[domains, ["OBSERVED", "P", "FDR", "STARS"]]
df_stat[split].to_csv("data/mod_mean_{}{}_{}.csv".format(framework, clf, split))
print("-" * 65 + "\n" + split.upper() + "\n" + "-" * 65)
print(df_stat[split])
print("")
%matplotlib inline
utilities.plot_split_violins(framework, domains, df_obs, df_null_interleaved, df_stat,
style.palettes[framework], metric="mod", dx=dx,
ylim=[0.75,1.75], yticks=[0.75,1,1.25,1.5,1.75],
interval=0.999, alphas=[0], suffix=clf)
| 0.266071 | 0.856332 |
# Introduction to the material
To increase the skill level of the team at [CEAi](http://ceai.io), we aim to organize "precision workshops", or workshops set up to maximize useful output for the group. The main idea behind precision workshops is solid preparation such that all participants of the workshop are at approximately the same level and that level is already somewhat beyond introductory. Such preparation should result in a workshop that is useful for all members, yet addresses advanced topics instead of the basics. The following material is intended for machine learners that want to get acquainted with MCMC and has a strong coding/product focus. This material together with the Precision workshop itself should build enough knowledge and confidence in people to use the following techniques in shipped products.
This material is built (on top of other material, see credits) to facilitate preparation for our first precision workshop on Bayesian modelling in May 2018.
The shape and form of the content was also inspired by the following quotes
> "Don't ever stop programming." - Geoff Hinton, in [interview with Andrew Ng](https://www.youtube.com/watch?v=-eyhCTvrEtE)
> "What people appreciated most about CS231n was that they would program everything from scratch, no libraries." - Andrej Karpathy, in [interview with Andrew Ng](https://www.youtube.com/watch?v=_au3yw46lcg)
## Objectives
This material should get the reader acquainted with the following topics and our open-ended exercises serve to build a solid grasp of each topic.
- Basic proficiency in Bayesian modelling
- Refresher: commonly used probability distributions
- Construction of models from standard building blocks
- Understanding the PyMC3 library
- Clarity on various steps needed to write a model
- Understanding inference via MCMC
- The Monte Carlo approach
- Basic sampling algorithms
- Markov Chain Monte Carlo as a method of estimating the posterior
- Understanding the Metropolis sampling algorithm in detail
- Basic idea behind more advanced samplers: HMC, NUTS
- Understand the idea of variational inference
- What is variational inference?
- The Evidence Lower Bound (ELBO)
- Mean field variational inference + example
At the end of the preparation the reader should be able to write and apply a simple Bayesian model from scratch using classical (albeit inefficient) methods including an MCMC sampler and also write such models in PyMC3. This will facilitate a deeper appreciation and understanding of the underlying problems.
As a side goal, we present some interesting models and provide a taste of some recent cool theoretical advances.
Also, last but not least: have fun!
## Precision workshop 1 target content
From there, the Precision workshop will further build upon this basis with key topics like:
- Building great models - heuristics, criticism, experiences
- Advanced MCMC, diagnostics, improvements
- Critiquing models of immediate interest in current startups at CEAi, consulting
- Model checking - answer to the question: is my model ready for the real world
## Credits
The original material on which these notebooks expand is the excellent [tutorial](https://github.com/fonnesbeck/PyMC3_DataScienceLA) by [Chris Fonnesbeck](https://twitter.com/fonnesbeck?lang=en) that was published under CCL 1.0.
We have tried to tweak the content and add new content to maximize explanatory power and reduce friction when learning but beware - Bayesian modelling is not an easy topic. This material is made available under the same license as the original tutorial by Chris Fonnesbeck (CCL 1.0).
Other sources include:
- Examples from [Getting started in PyMC3](http://docs.pymc.io/notebooks/getting_started.html) (Apache License 2.0)
- Various talks by David MacKay, Ian Murray and many others on the problem of inference
|
github_jupyter
|
# Introduction to the material
To increase the skill level of the team at [CEAi](http://ceai.io), we aim to organize "precision workshops", or workshops set up to maximize useful output for the group. The main idea behind precision workshops is solid preparation such that all participants of the workshop are at approximately the same level and that level is already somewhat beyond introductory. Such preparation should result in a workshop that is useful for all members, yet addresses advanced topics instead of the basics. The following material is intended for machine learners that want to get acquainted with MCMC and has a strong coding/product focus. This material together with the Precision workshop itself should build enough knowledge and confidence in people to use the following techniques in shipped products.
This material is built (on top of other material, see credits) to facilitate preparation for our first precision workshop on Bayesian modelling in May 2018.
The shape and form of the content was also inspired by the following quotes
> "Don't ever stop programming." - Geoff Hinton, in [interview with Andrew Ng](https://www.youtube.com/watch?v=-eyhCTvrEtE)
> "What people appreciated most about CS231n was that they would program everything from scratch, no libraries." - Andrej Karpathy, in [interview with Andrew Ng](https://www.youtube.com/watch?v=_au3yw46lcg)
## Objectives
This material should get the reader acquainted with the following topics and our open-ended exercises serve to build a solid grasp of each topic.
- Basic proficiency in Bayesian modelling
- Refresher: commonly used probability distributions
- Construction of models from standard building blocks
- Understanding the PyMC3 library
- Clarity on various steps needed to write a model
- Understanding inference via MCMC
- The Monte Carlo approach
- Basic sampling algorithms
- Markov Chain Monte Carlo as a method of estimating the posterior
- Understanding the Metropolis sampling algorithm in detail
- Basic idea behind more advanced samplers: HMC, NUTS
- Understand the idea of variational inference
- What is variational inference?
- The Evidence Lower Bound (ELBO)
- Mean field variational inference + example
At the end of the preparation the reader should be able to write and apply a simple Bayesian model from scratch using classical (albeit inefficient) methods including an MCMC sampler and also write such models in PyMC3. This will facilitate a deeper appreciation and understanding of the underlying problems.
As a side goal, we present some interesting models and provide a taste of some recent cool theoretical advances.
Also, last but not least: have fun!
## Precision workshop 1 target content
From there, the Precision workshop will further build upon this basis with key topics like:
- Building great models - heuristics, criticism, experiences
- Advanced MCMC, diagnostics, improvements
- Critiquing models of immediate interest in current startups at CEAi, consulting
- Model checking - answer to the question: is my model ready for the real world
## Credits
The original material on which these notebooks expand is the excellent [tutorial](https://github.com/fonnesbeck/PyMC3_DataScienceLA) by [Chris Fonnesbeck](https://twitter.com/fonnesbeck?lang=en) that was published under CCL 1.0.
We have tried to tweak the content and add new content to maximize explanatory power and reduce friction when learning but beware - Bayesian modelling is not an easy topic. This material is made available under the same license as the original tutorial by Chris Fonnesbeck (CCL 1.0).
Other sources include:
- Examples from [Getting started in PyMC3](http://docs.pymc.io/notebooks/getting_started.html) (Apache License 2.0)
- Various talks by David MacKay, Ian Murray and many others on the problem of inference
| 0.811003 | 0.976647 |
#### This notebook was designed to utilized as a short presentation with optional code view. In github, it renders as a static page.
#### Link to dataset utilized: https://www.consumerfinance.gov/data-research/consumer-complaints/
```
from IPython.display import HTML
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
<button style="Height:50px;font-size:24px" onclick="code_toggle()">View Code</button>''')
```
# How can we build complaint analytics to help determine what matters to our customers?
```
import numpy as np
import pandas as pd
import scipy.stats as stats
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('ggplot')
import re
import string
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
def dateLookup(s):
"""
Memoization solution for faster date_time parsing
source: https://stackoverflow.com/questions/29882573/pandas-slow-date-conversion
"""
dates = {date:pd.to_datetime(date) for date in s.unique()}
return s.map(dates)
def remove_redact(text):
return re.sub('XX/?','',text)
def bow_vectorize(text_series,max_df):
cv = CountVectorizer(stop_words='english',min_df=0.01,max_df=max_df,ngram_range=(1,6), max_features = 15000)
m = cv.fit_transform(text_series)
return {'cv':cv,'m':m}
def bow_conform(text_series,feature_names):
cv = CountVectorizer(stop_words='english',ngram_range=(1,6), max_features = 15000, vocabulary = feature_names)
m = cv.fit_transform(text_series)
return {'cv':cv,'m':m}
def tfidf_vectorize(bow_m,sublinear_tf):
tt = TfidfTransformer(norm='l2',smooth_idf=True,use_idf=True,sublinear_tf=sublinear_tf)
m = tt.fit_transform(bow_m)
return {'tt':tt,'m':m}
c = pd.read_csv('../data/Consumer_Complaints.csv')
#Take subset where complaint text is available
c = c[c['Consumer consent provided?'] == 'Consent provided']
#Convert yes/no strings to bool
for col in ['Consumer disputed?','Timely response?']:
c[col] = (c[col] == 'Yes')
#Convert datetimes
for col in ['Date received', 'Date sent to company']:
c[col] = dateLookup(c[col])
#All complaints in our subset are submitted via web and have consent provided.
c = c.drop(['Consumer consent provided?','Submitted via'],axis='columns')
#Convert all others, convert yes/no bools to int64
dataTypes = {
'Product':'category',
'Sub-product':'category',
'Issue':'category',
'Sub-issue':'category',
'Consumer complaint narrative':str,
'Company public response':str,
'Company':'category',
'State':'category',
'ZIP code':str,
'Tags':str,
'Company response to consumer':'category',
'Timely response?':'int64',
'Consumer disputed?':'int64',
'Complaint ID':'int64'
}
c = c.astype(dataTypes)
columnNames = {
'Date received':'date_received',
'Product':'product',
'Sub-product':'sub_product',
'Issue':'issue',
'Sub-issue':'sub_issue',
'Consumer complaint narrative':'text',
'Company public response':'pub_reply',
'Company':'company',
'State':'state',
'ZIP code':'zip_code',
'Tags':'tags',
'Date sent to company':'date_sent',
'Company response to consumer':'cust_reply',
'Timely response?':'timely_reply',
'Consumer disputed?':'disputed',
'Complaint ID':'ID'
}
c = c.rename(columns = columnNames)
b = c[c['product'] == 'Bank account or service'].copy()
#There are a few missing state entries, and not every complaint has a sub-product or sub-issue.
#Cleaning Up for Word Vectorization.
#Remove XXXXs
#Are numbers significant/helpful? Is there another format that might make them moreso?
#Strings of XXXX are used for redaction. These will need to be removed.
b.loc[:,'clean_text'] = b['text'].apply(remove_redact)
issueAliases = {
'Account opening, closing, or management':'Account',
'Deposits and withdrawals':'Transactions',
'Making/receiving payments, sending money':'Payments',
'Problems caused by my funds being low':'Low Funds',
'Using a debit or ATM card':'Card'
}
b['issue'] = b['issue'].map(issueAliases).copy()
issues = sorted(list(issueAliases.values()))
b = b.reset_index(drop=True)
b.index.name = 'Index'
b.to_csv('../Cleaned_Consumer_Complaints.csv')
```
## We'd like to have analytics on customer feedback.
Customers are constantly providing feedback that could be of great value in prioritizing which issues to address first. We can overcome the bias in anecdotal summaries of this feedback by seeing how much each issue is actually written about and how each issue has trended over time. Here's an example of what those displays could tell us, based on complaint data about Banking Services from the Consumer Financial Protection Bureau:
### A simple bar chart tells us what people are talking about most
```
p_issue_class = b.groupby(['product','issue'])['text'].count()
p_issue_class['Bank account or service'].plot(kind='barh',title = 'Complaints Per Issue, 04-2015 to 04-2017',color='black',figsize=(6,6))
plt.xlabel('# Of Complaints')
plt.ylabel('Issue')
plt.show()
```
### Alternatively, the way these issues have trended tell us if the issue's importance is rising, stable, or declining.
```
a = b
a.index = b['date_received']
issue_counts = {issue:a[a['issue'] == issue]['issue'].resample('W').count() for issue in issues}
for issue, series in issue_counts.items():
plt.plot(series,color='black')
plt.title(issue + ', Weekly Complaints')
plt.xticks(rotation=70)
plt.ylim(0,120)
plt.ylabel('Total Weekly Complaints')
plt.xlabel('Date')
plt.show()
```
## What is the fastest and most reliable way for us to classify the issues in our own complaint data?
Labeling every ticket by hand costs human labor. Can an algorithm learn to label the issue addressed in each message, based on a handful of human-labeled examples?
Since a human could definitely infer the issue addressed in each message by reading it, we can intuit that the text contains enough information to accurately predict the issue. This will depend largely on our ability to create **seperable categories**, which we can tell apart based on **quantifiable features** derived from the text. We'll then need to find a classification algorithm that latches onto those distinctions without being overzealous.
We'll reduce the information contained in the language of each ticket to a matrix with features, labels and observations. The features will be quantifiable aspects of that message, the labels will be the human-labeled issue, and each message will be an observation.
The most prevalent technique for extracting features from text is a "Bag of Words" matrix that shows how many time each phrase occured in a document. However, the phrases we're especially interested are those that are very commonly stated about a specific topic and very rarely stated about other topics.
The presence of such phrases should be predictive of the issue, especially when they co-occur. Conversely, we'll have plenty of generic phrases which are common across tickets or appear too rarely to be helpful.
Let's find out what the distribution of specific vs non-specific terms looks like. To do this we'll use a metric formally called TFIDF, designed to measure term specificity. TFIDF balances how often a term occurs in a particular issue against how often it occurs in general. To help us remember what we're talking about, we'll just refer to it as term specificity.
### Specific terms in the text content for each issue are far outnumbered by generic terms.
```
# We'll take a representive sampling of each issue's text.
np.random.seed(0)
issue_sample_size = 500;
issue_text_samples = {i:b[b['issue'] == i].sample(issue_sample_size)['clean_text'].reset_index(drop=True) for i in issues}
text_globs = pd.Series({i:' '.join(issue_text_samples[i]) for i in issues},index = issues)
glob_bow = bow_vectorize(text_globs,max_df=1.0)
#Computes sublinear tf, in which term frequency is replaced with 1 + log(tf), functions better for Doc Count.
glob_tfidf = tfidf_vectorize(glob_bow['m'],sublinear_tf=False)
bow_m = glob_bow['m'].todense()
tfidf_m = glob_tfidf['m'].todense()
feature_names = glob_bow['cv'].get_feature_names()
metrics = ['bow','tfidf']
comp_m = np.concatenate([row for i in range(bow_m.shape[0]) for row in
[bow_m[i,:],tfidf_m[i,:]]
])
comp_index = pd.MultiIndex.from_product([issues,metrics], names=['Issue', 'Metric'])
feature_index = pd.Index(feature_names,name='Features')
glob_df = pd.DataFrame(comp_m.T,index=feature_index, columns=comp_index)
plt.figure(figsize=(10,6))
plt.title("Distributions of Specificity Values for Each Issue")
for issue in issues:
sns.kdeplot(glob_df.loc[:,(issue,'tfidf')],shade=True,label=issue)
plt.ylabel('Frequency')
plt.xlabel('Specificity Values')
plt.show()
```
### Let's take a look at a few of the most and least specific terms for each issue.
```
np.random.seed(47)
for issue in issues:
print()
print('\033[1m' + issue + ' Issues' + '\033[0m','\n')
it = glob_df.loc[:,(issue,['tfidf','bow'])]
it.columns = it.columns.get_level_values(1)
it.columns = ["Frequency","Specificity"]
unigrams = it.loc[[i for i in glob_df.index if len(i.split(' ')) == 1]].sort_values(by='Specificity',ascending=False)
bigrams = it.loc[[i for i in glob_df.index if len(i.split(' ')) == 2]].sort_values(by='Specificity',ascending=False)
it = it.sort_values(by=['Specificity'],ascending=False)
print('One Word Phrases')
print(unigrams.head(15))
print()
print('Two Word Phrases')
print(bigrams.head(10))
print()
```
### We'll need a good strategy for handling numeric values
The above examples illustrate that numeric values play a peculiar role in our extracted terms. If we transform these into ranges that capture the way they are being uses, these features might retain their specificity whilst becoming more prevalent within the issue as a whole. For example, 34 and 35 dollar fees are popular terms in 'Low Funds' that can be aggregated.
### Depending on the problem framing, we may want to remove certain proper nouns.
Names of banks in the CDFI data set that this project utilizes may not be realistic terms to consider, depending on the project's final framing of client + issue.
### How would a classifier do with predicting the topic based on these collections of terms?
A multinomial bayes classifier, based on a straight forward extension of Baye's theorem, was trained to predict the issue based on the term specificity scores for the words in each document.
The heatmap matrix below illustrates the classifier's ability to distinguish between various classes.
```
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics
from sklearn.model_selection import train_test_split
example_set = b.reset_index(drop=True)[['clean_text','issue']]
#assemble training examples and labels for unbalanced set
#Make the vocabulary correspond to the one learned from the topic globs.
examples_bow = bow_conform(example_set['clean_text'],feature_names)
examples_tfidf = tfidf_vectorize(examples_bow['m'],sublinear_tf=True)
labels = example_set['issue']
#Take a balanced sampling for an alternative training set and an unbalanced/stratified test set
np.random.seed(42)
class_balance = np.array([len(example_set[example_set['issue'] == issue]) for issue in issues])
es_copy = example_set.copy()
training_samples = []
test_samples = []
smallest_class_total = np.min(class_balance)
test_frac = 0.33
training_size = int(np.floor(smallest_class_total*(1-test_frac)))
min_test_size = smallest_class_total-training_size
test_sample_sizes = zip(issues,np.floor(class_balance*(min_test_size/smallest_class_total)))
for issue in issues:
training_samples.append(es_copy[es_copy['issue'] == issue].sample(training_size,replace=False))
bal_training_set = pd.concat(training_samples)
es_copy = es_copy.drop(bal_training_set.index)
overlap_test=set(es_copy.index).intersection(set(bal_training_set.index))
for issue,test_size in test_sample_sizes:
test_samples.append(es_copy[es_copy['issue'] == issue].sample(int(test_size),replace=False))
strat_test_set = pd.concat(test_samples)
#assemble training examples and labels for balanced set
#Make the vocabulary correspond to the one learned from the topic globs.
bal_training_bow = bow_conform(bal_training_set['clean_text'],feature_names)
bal_training_tfidf = tfidf_vectorize(bal_training_bow['m'],sublinear_tf=True)
bal_training_labels = bal_training_set['issue']
strat_test_bow = bow_conform(strat_test_set['clean_text'],feature_names)
strat_test_tfidf = tfidf_vectorize(strat_test_bow['m'],sublinear_tf=True)
strat_test_labels = strat_test_set['issue']
bal_X_train,bal_y_train = bal_training_tfidf['m'], bal_training_labels
strat_X_test, strat_y_test = strat_test_tfidf['m'], strat_test_labels
#train a multinomial bayes classifier for both sets
bal_mnb_classifier = MultinomialNB()
bal_mnb_classifier.fit(X=bal_X_train,y=bal_y_train)
print()
predictions = bal_mnb_classifier.predict(X=strat_X_test)
confusion = np.matrix(metrics.confusion_matrix(strat_y_test,predictions))
rel_confusion = confusion/confusion.sum(axis=1)
rc_df = pd.DataFrame(rel_confusion,index=issues,columns=issues)
heat = sns.heatmap(rc_df,cmap='magma',annot=True)
heat.set(xlabel = 'Actual Issue',ylabel = 'Predicted Issue',title= 'Predicted vs Actual Issue')
plt.show()
print(metrics.classification_report(strat_y_test,predictions))
print('Num of training examples :',bal_mnb_classifier.class_count_)
```
### Final Thoughts
As you can see above, some issues are easier to predict than others, and certain pairs seem to blend together. One possible explanation for this is that specific language is not consistently used. With our further feature engineering efforts, we'll try to find generalizable signifiers that are both widely distributed and unique to the issue.
Our model of term specificity presents an interesting issue in practice since we're classifying a single new document rather than a new large corpus of documents. In order to take the advantage of context, we will utilize the Inverse Document Frequency metric from our training data to weight the Term Frequency of the test data. Normally the Term Frequency and Inverse Document Frequency would be computed based on the same corpus. This should help weigh the features in such a way that topic-specific phrases are emphasized.
**Based on the above, we can see that a more developed classifier may be able to parse these categories in a majority of cases. However, if specific terms aren't prevalent enough in a message, our algorithm could ask for human help.** This means that a reliable metric of confidence will play an important role in getting this algorithm to avoid the mis-classifications evident in the heatmap above.
We will also want to explore non-supervised 'topic modeling' methods to help us deal with an initial stack of unsorted topics. In this phase, we could verify that the categories we use for classification are seperable to an algorithm as well as meaningful to a person.
|
github_jupyter
|
from IPython.display import HTML
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
<button style="Height:50px;font-size:24px" onclick="code_toggle()">View Code</button>''')
import numpy as np
import pandas as pd
import scipy.stats as stats
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('ggplot')
import re
import string
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
def dateLookup(s):
"""
Memoization solution for faster date_time parsing
source: https://stackoverflow.com/questions/29882573/pandas-slow-date-conversion
"""
dates = {date:pd.to_datetime(date) for date in s.unique()}
return s.map(dates)
def remove_redact(text):
return re.sub('XX/?','',text)
def bow_vectorize(text_series,max_df):
cv = CountVectorizer(stop_words='english',min_df=0.01,max_df=max_df,ngram_range=(1,6), max_features = 15000)
m = cv.fit_transform(text_series)
return {'cv':cv,'m':m}
def bow_conform(text_series,feature_names):
cv = CountVectorizer(stop_words='english',ngram_range=(1,6), max_features = 15000, vocabulary = feature_names)
m = cv.fit_transform(text_series)
return {'cv':cv,'m':m}
def tfidf_vectorize(bow_m,sublinear_tf):
tt = TfidfTransformer(norm='l2',smooth_idf=True,use_idf=True,sublinear_tf=sublinear_tf)
m = tt.fit_transform(bow_m)
return {'tt':tt,'m':m}
c = pd.read_csv('../data/Consumer_Complaints.csv')
#Take subset where complaint text is available
c = c[c['Consumer consent provided?'] == 'Consent provided']
#Convert yes/no strings to bool
for col in ['Consumer disputed?','Timely response?']:
c[col] = (c[col] == 'Yes')
#Convert datetimes
for col in ['Date received', 'Date sent to company']:
c[col] = dateLookup(c[col])
#All complaints in our subset are submitted via web and have consent provided.
c = c.drop(['Consumer consent provided?','Submitted via'],axis='columns')
#Convert all others, convert yes/no bools to int64
dataTypes = {
'Product':'category',
'Sub-product':'category',
'Issue':'category',
'Sub-issue':'category',
'Consumer complaint narrative':str,
'Company public response':str,
'Company':'category',
'State':'category',
'ZIP code':str,
'Tags':str,
'Company response to consumer':'category',
'Timely response?':'int64',
'Consumer disputed?':'int64',
'Complaint ID':'int64'
}
c = c.astype(dataTypes)
columnNames = {
'Date received':'date_received',
'Product':'product',
'Sub-product':'sub_product',
'Issue':'issue',
'Sub-issue':'sub_issue',
'Consumer complaint narrative':'text',
'Company public response':'pub_reply',
'Company':'company',
'State':'state',
'ZIP code':'zip_code',
'Tags':'tags',
'Date sent to company':'date_sent',
'Company response to consumer':'cust_reply',
'Timely response?':'timely_reply',
'Consumer disputed?':'disputed',
'Complaint ID':'ID'
}
c = c.rename(columns = columnNames)
b = c[c['product'] == 'Bank account or service'].copy()
#There are a few missing state entries, and not every complaint has a sub-product or sub-issue.
#Cleaning Up for Word Vectorization.
#Remove XXXXs
#Are numbers significant/helpful? Is there another format that might make them moreso?
#Strings of XXXX are used for redaction. These will need to be removed.
b.loc[:,'clean_text'] = b['text'].apply(remove_redact)
issueAliases = {
'Account opening, closing, or management':'Account',
'Deposits and withdrawals':'Transactions',
'Making/receiving payments, sending money':'Payments',
'Problems caused by my funds being low':'Low Funds',
'Using a debit or ATM card':'Card'
}
b['issue'] = b['issue'].map(issueAliases).copy()
issues = sorted(list(issueAliases.values()))
b = b.reset_index(drop=True)
b.index.name = 'Index'
b.to_csv('../Cleaned_Consumer_Complaints.csv')
p_issue_class = b.groupby(['product','issue'])['text'].count()
p_issue_class['Bank account or service'].plot(kind='barh',title = 'Complaints Per Issue, 04-2015 to 04-2017',color='black',figsize=(6,6))
plt.xlabel('# Of Complaints')
plt.ylabel('Issue')
plt.show()
a = b
a.index = b['date_received']
issue_counts = {issue:a[a['issue'] == issue]['issue'].resample('W').count() for issue in issues}
for issue, series in issue_counts.items():
plt.plot(series,color='black')
plt.title(issue + ', Weekly Complaints')
plt.xticks(rotation=70)
plt.ylim(0,120)
plt.ylabel('Total Weekly Complaints')
plt.xlabel('Date')
plt.show()
# We'll take a representive sampling of each issue's text.
np.random.seed(0)
issue_sample_size = 500;
issue_text_samples = {i:b[b['issue'] == i].sample(issue_sample_size)['clean_text'].reset_index(drop=True) for i in issues}
text_globs = pd.Series({i:' '.join(issue_text_samples[i]) for i in issues},index = issues)
glob_bow = bow_vectorize(text_globs,max_df=1.0)
#Computes sublinear tf, in which term frequency is replaced with 1 + log(tf), functions better for Doc Count.
glob_tfidf = tfidf_vectorize(glob_bow['m'],sublinear_tf=False)
bow_m = glob_bow['m'].todense()
tfidf_m = glob_tfidf['m'].todense()
feature_names = glob_bow['cv'].get_feature_names()
metrics = ['bow','tfidf']
comp_m = np.concatenate([row for i in range(bow_m.shape[0]) for row in
[bow_m[i,:],tfidf_m[i,:]]
])
comp_index = pd.MultiIndex.from_product([issues,metrics], names=['Issue', 'Metric'])
feature_index = pd.Index(feature_names,name='Features')
glob_df = pd.DataFrame(comp_m.T,index=feature_index, columns=comp_index)
plt.figure(figsize=(10,6))
plt.title("Distributions of Specificity Values for Each Issue")
for issue in issues:
sns.kdeplot(glob_df.loc[:,(issue,'tfidf')],shade=True,label=issue)
plt.ylabel('Frequency')
plt.xlabel('Specificity Values')
plt.show()
np.random.seed(47)
for issue in issues:
print()
print('\033[1m' + issue + ' Issues' + '\033[0m','\n')
it = glob_df.loc[:,(issue,['tfidf','bow'])]
it.columns = it.columns.get_level_values(1)
it.columns = ["Frequency","Specificity"]
unigrams = it.loc[[i for i in glob_df.index if len(i.split(' ')) == 1]].sort_values(by='Specificity',ascending=False)
bigrams = it.loc[[i for i in glob_df.index if len(i.split(' ')) == 2]].sort_values(by='Specificity',ascending=False)
it = it.sort_values(by=['Specificity'],ascending=False)
print('One Word Phrases')
print(unigrams.head(15))
print()
print('Two Word Phrases')
print(bigrams.head(10))
print()
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics
from sklearn.model_selection import train_test_split
example_set = b.reset_index(drop=True)[['clean_text','issue']]
#assemble training examples and labels for unbalanced set
#Make the vocabulary correspond to the one learned from the topic globs.
examples_bow = bow_conform(example_set['clean_text'],feature_names)
examples_tfidf = tfidf_vectorize(examples_bow['m'],sublinear_tf=True)
labels = example_set['issue']
#Take a balanced sampling for an alternative training set and an unbalanced/stratified test set
np.random.seed(42)
class_balance = np.array([len(example_set[example_set['issue'] == issue]) for issue in issues])
es_copy = example_set.copy()
training_samples = []
test_samples = []
smallest_class_total = np.min(class_balance)
test_frac = 0.33
training_size = int(np.floor(smallest_class_total*(1-test_frac)))
min_test_size = smallest_class_total-training_size
test_sample_sizes = zip(issues,np.floor(class_balance*(min_test_size/smallest_class_total)))
for issue in issues:
training_samples.append(es_copy[es_copy['issue'] == issue].sample(training_size,replace=False))
bal_training_set = pd.concat(training_samples)
es_copy = es_copy.drop(bal_training_set.index)
overlap_test=set(es_copy.index).intersection(set(bal_training_set.index))
for issue,test_size in test_sample_sizes:
test_samples.append(es_copy[es_copy['issue'] == issue].sample(int(test_size),replace=False))
strat_test_set = pd.concat(test_samples)
#assemble training examples and labels for balanced set
#Make the vocabulary correspond to the one learned from the topic globs.
bal_training_bow = bow_conform(bal_training_set['clean_text'],feature_names)
bal_training_tfidf = tfidf_vectorize(bal_training_bow['m'],sublinear_tf=True)
bal_training_labels = bal_training_set['issue']
strat_test_bow = bow_conform(strat_test_set['clean_text'],feature_names)
strat_test_tfidf = tfidf_vectorize(strat_test_bow['m'],sublinear_tf=True)
strat_test_labels = strat_test_set['issue']
bal_X_train,bal_y_train = bal_training_tfidf['m'], bal_training_labels
strat_X_test, strat_y_test = strat_test_tfidf['m'], strat_test_labels
#train a multinomial bayes classifier for both sets
bal_mnb_classifier = MultinomialNB()
bal_mnb_classifier.fit(X=bal_X_train,y=bal_y_train)
print()
predictions = bal_mnb_classifier.predict(X=strat_X_test)
confusion = np.matrix(metrics.confusion_matrix(strat_y_test,predictions))
rel_confusion = confusion/confusion.sum(axis=1)
rc_df = pd.DataFrame(rel_confusion,index=issues,columns=issues)
heat = sns.heatmap(rc_df,cmap='magma',annot=True)
heat.set(xlabel = 'Actual Issue',ylabel = 'Predicted Issue',title= 'Predicted vs Actual Issue')
plt.show()
print(metrics.classification_report(strat_y_test,predictions))
print('Num of training examples :',bal_mnb_classifier.class_count_)
| 0.465387 | 0.808067 |
```
import sys
sys.path.append("..")
import numpy as np
import pickle
from recnn.preprocessing import load_from_pickle
```
# W vs QCD
The original splits were made as 180k for training and 20k for test.
We ended up rebalancing the splits as 100k for training and 100 for test. This repartition is found in the last cell of `03-preprocessing`.
```
background = load_from_pickle("/home/gilles/gdrive/research/sandbox/learning-qcd-rnn/data/w-vs-qcd/anti-kt/antikt-qcd.pickle", 100000)
signal = load_from_pickle("/home/gilles/gdrive/research/sandbox/learning-qcd-rnn/data/w-vs-qcd/anti-kt/antikt-w.pickle", 100000)
X_train = []
y_train = []
X_test = []
y_test = []
for i in range(90000):
X_train.append(background[i])
y_train.append(0)
for i in range(90000):
X_train.append(signal[i])
y_train.append(1)
for i in range(90000, 100000):
X_test.append(background[i])
y_test.append(0)
for i in range(90000, 100000):
X_test.append(signal[i])
y_test.append(1)
fd = open("../data/w-vs-qcd/anti-kt/antikt-train.pickle", "wb")
pickle.dump((X_train, y_train), fd, protocol=pickle.HIGHEST_PROTOCOL)
fd.close()
fd = open("../data/w-vs-qcd/anti-kt/antikt-test.pickle", "wb")
pickle.dump((X_test, y_test), fd, protocol=pickle.HIGHEST_PROTOCOL)
fd.close()
# soft
background = load_from_pickle("/home/gilles/gdrive/research/sandbox/learning-qcd-rnn/data/w-vs-qcd/anti-kt/antikt-soft-qcd.pickle", 100000)
signal = load_from_pickle("/home/gilles/gdrive/research/sandbox/learning-qcd-rnn/data/w-vs-qcd/anti-kt/antikt-soft-w.pickle", 100000)
X_train = []
y_train = []
X_test = []
y_test = []
for i in range(90000):
X_train.append(background[i])
y_train.append(0)
for i in range(90000):
X_train.append(signal[i])
y_train.append(1)
for i in range(90000, 100000):
X_test.append(background[i])
y_test.append(0)
for i in range(90000, 100000):
X_test.append(signal[i])
y_test.append(1)
fd = open("../data/w-vs-qcd/anti-kt/antikt-soft-train.pickle", "wb")
pickle.dump((X_train, y_train), fd, protocol=pickle.HIGHEST_PROTOCOL)
fd.close()
fd = open("../data/w-vs-qcd/anti-kt/antikt-soft-test.pickle", "wb")
pickle.dump((X_test, y_test), fd, protocol=pickle.HIGHEST_PROTOCOL)
fd.close()
# delphes data
background = load_from_pickle("/home/gilles/gdrive/research/sandbox/learning-qcd-rnn/data/w-vs-qcd/anti-kt/antikt-qcd-delphes.pickle", 100000)
signal = load_from_pickle("/home/gilles/gdrive/research/sandbox/learning-qcd-rnn/data/w-vs-qcd/anti-kt/antikt-w-delphes.pickle", 100000)
X_train = []
y_train = []
X_test = []
y_test = []
for i in range(90000):
X_train.append(background[i])
y_train.append(0)
for i in range(90000):
X_train.append(signal[i])
y_train.append(1)
for i in range(90000, 100000):
X_test.append(background[i])
y_test.append(0)
for i in range(90000, 100000):
X_test.append(signal[i])
y_test.append(1)
fd = open("../data/w-vs-qcd/anti-kt/antikt-delphes-train.pickle", "wb")
pickle.dump((X_train, y_train), fd, protocol=pickle.HIGHEST_PROTOCOL)
fd.close()
fd = open("../data/w-vs-qcd/anti-kt/antikt-delphes-test.pickle", "wb")
pickle.dump((X_test, y_test), fd, protocol=pickle.HIGHEST_PROTOCOL)
fd.close()
# images data
background = load_from_pickle("/home/gilles/gdrive/research/sandbox/learning-qcd-rnn/data/w-vs-qcd/anti-kt/images-qcd.pickle", 100000)
signal = load_from_pickle("/home/gilles/gdrive/research/sandbox/learning-qcd-rnn/data/w-vs-qcd/anti-kt/images-w.pickle", 100000)
X_train = []
y_train = []
X_test = []
y_test = []
for i in range(50000):
X_train.append(background[i])
y_train.append(0)
for i in range(50000):
X_train.append(signal[i])
y_train.append(1)
for i in range(50000, 100000):
X_test.append(background[i])
y_test.append(0)
for i in range(50000, 100000):
X_test.append(signal[i])
y_test.append(1)
fd = open("../data/w-vs-qcd/anti-kt/images-train.pickle", "wb")
pickle.dump((X_train, y_train), fd, protocol=2)
fd.close()
fd = open("../data/w-vs-qcd/anti-kt/images-test.pickle", "wb")
pickle.dump((X_test, y_test), fd, protocol=2)
fd.close()
# event-level data
fd_background = open("/home/gilles/gdrive/research/sandbox/learning-qcd-rnn/data/w-vs-qcd/anti-kt/antikt-qcd-event.pickle", "rb")
fd_signal = open("/home/gilles/gdrive/research/sandbox/learning-qcd-rnn/data/w-vs-qcd/anti-kt/antikt-w-event.pickle", "rb")
# fd_background = open("/home/gilles/gdrive/research/sandbox/learning-qcd-rnn/data/w-vs-qcd/anti-kt/antikt-delphes-qcd-event.pickle", "rb")
# fd_signal = open("/home/gilles/gdrive/research/sandbox/learning-qcd-rnn/data/w-vs-qcd/anti-kt/antikt-delphes-w-event.pickle", "rb")
fd_train = open("../data/w-vs-qcd/anti-kt/antikt-event-train.pickle", "wb")
# fd_train = open("../data/w-vs-qcd/anti-kt/antikt-delphes-event-train.pickle", "wb")
for i in range(40000):
event = pickle.load(fd_background)
pickle.dump((event, 0), fd_train, protocol=2)
event = pickle.load(fd_signal)
pickle.dump((event, 1), fd_train, protocol=2)
fd_train.close()
fd_test = open("../data/w-vs-qcd/anti-kt/antikt-event-test.pickle", "wb")
# fd_test = open("../data/w-vs-qcd/anti-kt/antikt-delphes-event-test.pickle", "wb")
for i in range(10000):
event = pickle.load(fd_background)
pickle.dump((event, 0), fd_test, protocol=2)
event = pickle.load(fd_signal)
pickle.dump((event, 1), fd_test, protocol=2)
fd_test.close()
```
|
github_jupyter
|
import sys
sys.path.append("..")
import numpy as np
import pickle
from recnn.preprocessing import load_from_pickle
background = load_from_pickle("/home/gilles/gdrive/research/sandbox/learning-qcd-rnn/data/w-vs-qcd/anti-kt/antikt-qcd.pickle", 100000)
signal = load_from_pickle("/home/gilles/gdrive/research/sandbox/learning-qcd-rnn/data/w-vs-qcd/anti-kt/antikt-w.pickle", 100000)
X_train = []
y_train = []
X_test = []
y_test = []
for i in range(90000):
X_train.append(background[i])
y_train.append(0)
for i in range(90000):
X_train.append(signal[i])
y_train.append(1)
for i in range(90000, 100000):
X_test.append(background[i])
y_test.append(0)
for i in range(90000, 100000):
X_test.append(signal[i])
y_test.append(1)
fd = open("../data/w-vs-qcd/anti-kt/antikt-train.pickle", "wb")
pickle.dump((X_train, y_train), fd, protocol=pickle.HIGHEST_PROTOCOL)
fd.close()
fd = open("../data/w-vs-qcd/anti-kt/antikt-test.pickle", "wb")
pickle.dump((X_test, y_test), fd, protocol=pickle.HIGHEST_PROTOCOL)
fd.close()
# soft
background = load_from_pickle("/home/gilles/gdrive/research/sandbox/learning-qcd-rnn/data/w-vs-qcd/anti-kt/antikt-soft-qcd.pickle", 100000)
signal = load_from_pickle("/home/gilles/gdrive/research/sandbox/learning-qcd-rnn/data/w-vs-qcd/anti-kt/antikt-soft-w.pickle", 100000)
X_train = []
y_train = []
X_test = []
y_test = []
for i in range(90000):
X_train.append(background[i])
y_train.append(0)
for i in range(90000):
X_train.append(signal[i])
y_train.append(1)
for i in range(90000, 100000):
X_test.append(background[i])
y_test.append(0)
for i in range(90000, 100000):
X_test.append(signal[i])
y_test.append(1)
fd = open("../data/w-vs-qcd/anti-kt/antikt-soft-train.pickle", "wb")
pickle.dump((X_train, y_train), fd, protocol=pickle.HIGHEST_PROTOCOL)
fd.close()
fd = open("../data/w-vs-qcd/anti-kt/antikt-soft-test.pickle", "wb")
pickle.dump((X_test, y_test), fd, protocol=pickle.HIGHEST_PROTOCOL)
fd.close()
# delphes data
background = load_from_pickle("/home/gilles/gdrive/research/sandbox/learning-qcd-rnn/data/w-vs-qcd/anti-kt/antikt-qcd-delphes.pickle", 100000)
signal = load_from_pickle("/home/gilles/gdrive/research/sandbox/learning-qcd-rnn/data/w-vs-qcd/anti-kt/antikt-w-delphes.pickle", 100000)
X_train = []
y_train = []
X_test = []
y_test = []
for i in range(90000):
X_train.append(background[i])
y_train.append(0)
for i in range(90000):
X_train.append(signal[i])
y_train.append(1)
for i in range(90000, 100000):
X_test.append(background[i])
y_test.append(0)
for i in range(90000, 100000):
X_test.append(signal[i])
y_test.append(1)
fd = open("../data/w-vs-qcd/anti-kt/antikt-delphes-train.pickle", "wb")
pickle.dump((X_train, y_train), fd, protocol=pickle.HIGHEST_PROTOCOL)
fd.close()
fd = open("../data/w-vs-qcd/anti-kt/antikt-delphes-test.pickle", "wb")
pickle.dump((X_test, y_test), fd, protocol=pickle.HIGHEST_PROTOCOL)
fd.close()
# images data
background = load_from_pickle("/home/gilles/gdrive/research/sandbox/learning-qcd-rnn/data/w-vs-qcd/anti-kt/images-qcd.pickle", 100000)
signal = load_from_pickle("/home/gilles/gdrive/research/sandbox/learning-qcd-rnn/data/w-vs-qcd/anti-kt/images-w.pickle", 100000)
X_train = []
y_train = []
X_test = []
y_test = []
for i in range(50000):
X_train.append(background[i])
y_train.append(0)
for i in range(50000):
X_train.append(signal[i])
y_train.append(1)
for i in range(50000, 100000):
X_test.append(background[i])
y_test.append(0)
for i in range(50000, 100000):
X_test.append(signal[i])
y_test.append(1)
fd = open("../data/w-vs-qcd/anti-kt/images-train.pickle", "wb")
pickle.dump((X_train, y_train), fd, protocol=2)
fd.close()
fd = open("../data/w-vs-qcd/anti-kt/images-test.pickle", "wb")
pickle.dump((X_test, y_test), fd, protocol=2)
fd.close()
# event-level data
fd_background = open("/home/gilles/gdrive/research/sandbox/learning-qcd-rnn/data/w-vs-qcd/anti-kt/antikt-qcd-event.pickle", "rb")
fd_signal = open("/home/gilles/gdrive/research/sandbox/learning-qcd-rnn/data/w-vs-qcd/anti-kt/antikt-w-event.pickle", "rb")
# fd_background = open("/home/gilles/gdrive/research/sandbox/learning-qcd-rnn/data/w-vs-qcd/anti-kt/antikt-delphes-qcd-event.pickle", "rb")
# fd_signal = open("/home/gilles/gdrive/research/sandbox/learning-qcd-rnn/data/w-vs-qcd/anti-kt/antikt-delphes-w-event.pickle", "rb")
fd_train = open("../data/w-vs-qcd/anti-kt/antikt-event-train.pickle", "wb")
# fd_train = open("../data/w-vs-qcd/anti-kt/antikt-delphes-event-train.pickle", "wb")
for i in range(40000):
event = pickle.load(fd_background)
pickle.dump((event, 0), fd_train, protocol=2)
event = pickle.load(fd_signal)
pickle.dump((event, 1), fd_train, protocol=2)
fd_train.close()
fd_test = open("../data/w-vs-qcd/anti-kt/antikt-event-test.pickle", "wb")
# fd_test = open("../data/w-vs-qcd/anti-kt/antikt-delphes-event-test.pickle", "wb")
for i in range(10000):
event = pickle.load(fd_background)
pickle.dump((event, 0), fd_test, protocol=2)
event = pickle.load(fd_signal)
pickle.dump((event, 1), fd_test, protocol=2)
fd_test.close()
| 0.07029 | 0.416797 |
# Programación Evolutiva
La librería **Pyristic** incluye una clase llamada `EvolutionaryProgramming` inspirada en la metaheurística de *Programación Evolutiva* (PE) para resolver problemas de minimización. Para trabajar con esta clase se requiere hacer lo siguiente:
1. Definir:
* La función objetivo $f$.
* La lista de restricciones.
* Lista de límites inferiores y superiores.
* Configuración de operadores de la metaheurística (opcional).
2. Crear una clase que hereda de `EvolutionaryProgramming`.
3. Sobreescribir las siguientes funciones:
* mutation_operator (opcional)
* adaptive_mutation (opcional)
* survivor_selection (opcional)
* initialize_step_weights (opcional)
* initialize_population (opcional)
* fixer (opcional)
A continuación se muestran los elementos que se deben importar.
### Librerías externas
```
from pprint import pprint
import math
import numpy as np
import copy
from IPython.display import Image
from IPython.core.display import HTML
```
### Componentes de `pyristic`
La estructura que está organizada la librería es:
* Las metaheurísticas están ubicadas en `heuristic`.
* Las funciones de prueba están ubicadas en `utils.test_function`.
* Las clases auxiliares para mantener la información de los operadores que serán empleados para alguna de las metaheurísticas basadas en los paradigmas del cómputo evolutivo están ubicadas en `utils.helpers`.
* Las metaheurísticas basadas en los paradigmas del cómputo evolutivo dependen de un conjunto de operadores (selección, mutación y cruza). Estos operadores están ubicados en `utils.operators`.
Para demostrar el uso de nuestra metaheurística basada en *algoritmos geneticos* tenemos que importar la clase llamada `Genetic` que se encuentra en `heuristic.GeneticAlgorithm_search`.
```
from pyristic.heuristic.EvolutiveProgramming_search import EvolutionaryProgramming
from pyristic.utils.helpers import EvolutionaryProgrammingConfig, get_stats, ContinuosFixer
from pyristic.utils.test_function import beale_, ackley_
from pyristic.utils.operators import mutation, selection
```
## Función de Beale
\begin{equation}
\label{eq:BF}
\begin{array}{rll}
\text{minimizar:} & f(x_1, x_2) = (1.5 - x_1 + x_1x_2)^2 + (2.25 - x_1 + x_1x_2^2)^2 + (2.625 - x_1 + x_1x_2^3)^2
& \\
\text{tal que: } & -4.5 \leq x_1,x_2 \leq 4.5 &
\end{array}
\end{equation}
El mínimo global se encuentra en $x^* = (3, 0.5)$ y $f(x^*) = 0$.
```
Image(filename="include/beale.png", width=500, height=300)
```
Para inicializar un objeto de la clase `EvolutionaryProgramming`, es necesario implementar los siguientes elementos.
#### Función objetivo
```
def f(x : np.ndarray) -> float:
a = (1.5 - x[0] + x[0]*x[1])**2
b = (2.25 - x[0] + x[0]*x[1]**2)**2
c = (2.625 - x[0] + x[0]*x[1]**3)**2
return a+b+c
```
#### Restricciones
Las restricciones se definen como una lista de funciones que retornan valores booleanos, estos valores permiten revisar si una solución es factible o no. En el caso de la función de Beale, solo se tienen restricciones de caja.
```
def is_feasible(x : np.ndarray) -> bool:
for i in range(len(x)):
if -4.5>x[i] or x[i] > 4.5:
return False
is_feasible.__doc__="x1: -4.5 <= {:.2f} <= 4.5 \n x2: -4.5 <= {:.2f} <= 4.5".format(x[0],x[1])
return True
constraints_ = [is_feasible]
```
#### Límites de las variables del problema
Los límites de las variables de decisión son establecidos en una matriz (lista de listas) de tamaño (2 x $n$), donde $n$ es el número de variables. La primera fila contiene los límites inferiores de cada una de las variables de decisión y la segunda fila los límites superiores.
```
lower_bound = [-4.5,-4.5]
upper_bound = [4.5, 4.5]
bounds = [lower_bound, upper_bound]
```
*Nota:* En el caso de que todas las variables de decisión se encuentren en el mismo rango de búsqueda, se puede utilizar una única lista con dos valores numéricos, donde, el primer valor representa el límite inferior y el segundo el límite superior.
Por ejemplo, la función de Beale es una función de dos variables ($x_{1}, x_{2}$). La dos variables están acotadas en el mismo espacio de búsqueda $ -4.5 < x_{i} < 4.5, i \in [1,2]$, entonces, en lugar de emplear la representación descrita antes, podemos sustituirla por:
``` python
bounds = [-4.5, 4.5]
```
En esta representación, la primera componente se refiere al límite inferior, mientras, la segunda componente es el límite superior. Este arreglo será interpretado como el espacio de búsqueda para todas las variables de decisión.
La librería **Pyristic** tiene implementados algunos problemas de prueba en `utils.helpers.test_function`, entre ellos la función de Beale. Los problemas de prueba están definidos como diccionarios con las siguientes llaves:
* `function.` Función objetivo.
* `contraints.` Restricciones del problema, lista con al menos una función que regresa un valor booleano.
* `bounds.` Límites para cada una de las variables del problema. En el caso de que todas las variables del problema se encuentren en el mismo intervalo de búsqueda, se puede emplear una lista con dos valores numéricos.
* `decision_variables.` Número de variables de decisión.
```
beale_
```
### Declaración de `EvolutionaryProgramming`
La metaheurística de PE implementada en la librería **Pyristic** se puede utilizar de las siguientes maneras:
* Crear una clase que herede de la clase `EvolutionaryProgramming` y sobreescribir las funciones antes mencionadas.
* Declarar un objeto del tipo `EvolutionaryProgrammingConfig` y ajustar los operadores.
* Realizar una combinación de las dos anteriores.
Es importante resaltar que se puede hacer uso de `EvolutionaryProgramming` sin modificar los operadores que tiene por defecto.
### Ejecución de la metaheurística
Como mencionamos antes, una forma de utilizar la metaheurística es hacer una instancia de la clase `EvolutionProgramming` dejando su configuración por defecto.
Los argumentos que se deben indicar al momento de inicializar son:
* función objetivo
* restricciones del problema
* límites inferior y superior (por cada variable de decisión)
* número de variables que tiene el problema.
```
Beale_optimizer = EvolutionaryProgramming(**beale_)
```
Recordemos que `beale_` es un diccionario con la información requerida por el constructor de la clase `EvolutionaryProgramming`.
Finalmente, se llama a la función `optimize`. Recibe los siguientes parámetros:
* **generations**. Número de generaciones (iteraciones de la metaheurística).
* **size_population**. Tamaño de la población (número de individuos).
* **verbose**. Muestra en qué iteración se encuentra nuestra búsqueda, por defecto está en True.
* **\**kwargs**. Argumentos externos a la búsqueda.
Para resolver la función de Beale utilizaremos los siguientes parámetros:
* **generations** = $200$
* **size_population** = $100$
* **verbose** = True.
```
Beale_optimizer.optimize(200,100)
print(Beale_optimizer)
```
Para revisar el comportamiento de la metaheurística en determinado problema, la librería **Pyristic** cuenta con una función llamada `get_stats`. Esta función se encuentra en `utils.helpers` y recibe como parámetros:
* Objeto que realiza la búsqueda de soluciones.
* El número de veces que se quiere ejecutar la metaheurística.
* Los argumentos que recibe la función `optimize` (debe ser una tupla).
* Argumentos adicionales a la búsqueda, estos argumentos deben estar contenidos en un diccionario (opcional).
La función `get_stats` retorna un diccionario con algunas estadísticas de las ejecuciones.
```
args = (700, 100, False)
statistics = get_stats(Beale_optimizer, 21, args)
pprint(statistics)
```
## Función de Ackley
\begin{equation}
\min f(\vec{x}) = -20\exp \left( -0.2 \sqrt{\frac{1}{n} \sum_{i=1}^n x_i^2} \right)
- exp \left( \frac{1}{n} \sum_{i=1}^n \cos (2\pi x_i) \right)
+ 20 + e
\end{equation}
El mínimo global está en $x^* = 0 $ y $f(\vec{x}) = 0$ y su dominio es $|x_{i}| < 30$.
```
Image(filename="include/ackley.jpg", width=500, height=300)
ackley_
```
En el caso de la función de Ackley, al no estár con restricción de variables de decisión, podemos modificar el número de variables de decisión que tiene por defecto de la siguiente manera:
```python
ackley_['decision_variable'] = 5
```
Para resolver la función de Ackley con 10 variables de decisión utilizaremos los siguientes parámetros:
* **generations** = $500$
* **size_population** = $100$
```
Optimizer_by_default = EvolutionaryProgramming(**ackley_)
Optimizer_by_default.optimize(500,100)
print(Optimizer_by_default)
```
La solución encontrada por PE no es la solución óptima. A continuación mostraremos la ejecución de la metaheurística modificando la configuración por defecto.
### Declaración de `EvolutionaryProgramming` por configuración
A continuación vamos a mostrar la forma en que se declara un objeto del tipo `EvolutionaryProgrammingConfig` que es una clase auxiliar, donde, contendrá los operadores que se emplea en la ejecución de la metaheurística.
```
configuration = (EvolutionaryProgrammingConfig()
.adaptive_mutation(
mutation.sigma_ep_adaptive_mutator(ackley_['decision_variables'], 2.0)
)
)
```
En este ejemplo mostraremos el impacto de inicializar el operador de mutación en los tamaños de paso con $\alpha = 2$, el constructor en caso de no incluir este parámetro lo inicializa con $\alpha = 0.5$.
```
print(configuration)
Optimizer_by_configuration = EvolutionaryProgramming(**ackley_, config=configuration)
```
A diferencia del ejemplo de la función de Beale, incluimos la configuración de los operadores que deseamos utilizar en la variable llamada `config` al crear un objeto de la clase `EvolutionaryProgramming`.
```
Optimizer_by_configuration.optimize(500,100)
print(Optimizer_by_configuration)
```
### Herencia desde *EvolutionaryProgramming*
Otra forma de utilizar la metaheurística de nuestra librería es definiendo una clase que herede de `EvolutionaryProgramming`, donde, vamos a sobreescribir el método `adaptive_mutation` y así permitir incluir distintos valores para $\alpha$.
```
class EPAckley(EvolutionaryProgramming):
def __init__(self, function, decision_variables, constraints, bounds):
super().__init__(function, decision_variables, constraints, bounds)
def adaptive_mutation(self, **kwargs):
alpha_ = kwargs['alpha']
return mutation.sigma_ep_adaptive(self.logger['parent_population_sigma'], alpha_)
additional_arguments = {'alpha':2.0}
```
El diccionario que hemos definido tiene el parámetro que se utiliza en la función `adaptive_mutation`.
```
Optimizer_by_class = EPAckley(**ackley_)
Optimizer_by_class.optimize(500,100,**additional_arguments)
print(Optimizer_by_class)
```
### Resultados
#### Usando la configuración por defecto
```
args = (500,100,False)
statistics = get_stats(Optimizer_by_default, 21, args)
pprint(statistics)
```
#### Indicando la configuración que va a utilizar PE
```
args = (500,100,False)
statistics = get_stats(Optimizer_by_configuration, 21, args)
pprint(statistics)
```
#### Creando una clase que hereda de *EvolutionaryProgramming*
```
args = (500,100,False)
statistics = get_stats(Optimizer_by_class, 21, args,**additional_arguments)
pprint(statistics)
```
Las últimas dos estrategias utilizan la misma configuración, la única diferencia es la forma en que se han creado y los resultados obtenidos pueden variar por los números aleatorios generados.
|
github_jupyter
|
from pprint import pprint
import math
import numpy as np
import copy
from IPython.display import Image
from IPython.core.display import HTML
from pyristic.heuristic.EvolutiveProgramming_search import EvolutionaryProgramming
from pyristic.utils.helpers import EvolutionaryProgrammingConfig, get_stats, ContinuosFixer
from pyristic.utils.test_function import beale_, ackley_
from pyristic.utils.operators import mutation, selection
Image(filename="include/beale.png", width=500, height=300)
def f(x : np.ndarray) -> float:
a = (1.5 - x[0] + x[0]*x[1])**2
b = (2.25 - x[0] + x[0]*x[1]**2)**2
c = (2.625 - x[0] + x[0]*x[1]**3)**2
return a+b+c
def is_feasible(x : np.ndarray) -> bool:
for i in range(len(x)):
if -4.5>x[i] or x[i] > 4.5:
return False
is_feasible.__doc__="x1: -4.5 <= {:.2f} <= 4.5 \n x2: -4.5 <= {:.2f} <= 4.5".format(x[0],x[1])
return True
constraints_ = [is_feasible]
lower_bound = [-4.5,-4.5]
upper_bound = [4.5, 4.5]
bounds = [lower_bound, upper_bound]
beale_
Beale_optimizer = EvolutionaryProgramming(**beale_)
Beale_optimizer.optimize(200,100)
print(Beale_optimizer)
args = (700, 100, False)
statistics = get_stats(Beale_optimizer, 21, args)
pprint(statistics)
Image(filename="include/ackley.jpg", width=500, height=300)
ackley_
ackley_['decision_variable'] = 5
Optimizer_by_default = EvolutionaryProgramming(**ackley_)
Optimizer_by_default.optimize(500,100)
print(Optimizer_by_default)
configuration = (EvolutionaryProgrammingConfig()
.adaptive_mutation(
mutation.sigma_ep_adaptive_mutator(ackley_['decision_variables'], 2.0)
)
)
print(configuration)
Optimizer_by_configuration = EvolutionaryProgramming(**ackley_, config=configuration)
Optimizer_by_configuration.optimize(500,100)
print(Optimizer_by_configuration)
class EPAckley(EvolutionaryProgramming):
def __init__(self, function, decision_variables, constraints, bounds):
super().__init__(function, decision_variables, constraints, bounds)
def adaptive_mutation(self, **kwargs):
alpha_ = kwargs['alpha']
return mutation.sigma_ep_adaptive(self.logger['parent_population_sigma'], alpha_)
additional_arguments = {'alpha':2.0}
Optimizer_by_class = EPAckley(**ackley_)
Optimizer_by_class.optimize(500,100,**additional_arguments)
print(Optimizer_by_class)
args = (500,100,False)
statistics = get_stats(Optimizer_by_default, 21, args)
pprint(statistics)
args = (500,100,False)
statistics = get_stats(Optimizer_by_configuration, 21, args)
pprint(statistics)
args = (500,100,False)
statistics = get_stats(Optimizer_by_class, 21, args,**additional_arguments)
pprint(statistics)
| 0.396652 | 0.922761 |
```
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
from sklearn.metrics import roc_auc_score
```
### Rahul's style ROC plot ###
```
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
#fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
#roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
#plt.figure()
from sklearn.metrics import roc_curve, auc
n_classes = 3
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
import matplotlib.pyplot as plt
lw = 2
fig, ax = plt.subplots()
ax.plot(fpr[2], tpr[2], color='#732673',
lw=lw, label='AUROC = %0.2f' % roc_auc[2])
ax.plot([0, 1], [0, 1], color='#505050', lw=lw, linestyle='--')
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.05])
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('True Positive Rate')
ax.set_facecolor('#f7f7f7')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.spines['left'].set_position(('outward', 5))
ax.spines['bottom'].set_position(('outward', 5))
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.grid(b=True, which='major', color='white', linestyle='-', linewidth=3.0)
#ax.grid(b=True, which='minor', color='white', linestyle='-', linewidth=1)
ax.set_axisbelow(True)
ax.legend(loc='best', frameon=False)
plt.minorticks_on()
plt.rcParams.update({'font.size': 18})
plt.show()
outfile = "test.png"
fig.savefig(outfile)
plt.close(fig)
```
### Rahul's style for scatter plots ###
```
fig, ax = plt.subplots()
ax.scatter(X[0], X[1], color='#732673', alpha = 0.3)
lims = [
np.min([ax.get_xlim(), ax.get_ylim()]), # min of both axes
np.max([ax.get_xlim(), ax.get_ylim()]), # max of both axes
]
ax.plot(lims, lims, alpha=0.75, zorder=0, color='#505050', linestyle='--', dashes = [2,2])
ax.set_aspect('equal')
ax.set_xlim(lims)
ax.set_ylim(lims)
#ax.set_xlim([-0.02, 1.0])
#ax.set_ylim([-0.02, 1.05])
ax.set_xlabel('X Axis Label')
ax.set_ylabel('Y Axis Lavel')
ax.set_facecolor('#f7f7f7')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.spines['left'].set_position(('outward', 5))
ax.spines['bottom'].set_position(('outward', 5))
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.grid(b=True, which='major', color='white', linestyle='-', linewidth=3.0)
#ax.grid(b=True, which='minor', color='white', linestyle='-', linewidth=1)
ax.set_axisbelow(True)
#ax.legend(loc='best', frameon=False)
plt.minorticks_on()
plt.tight_layout()
plt.rcParams.update({'font.size': 18})
plt.show()
outfile = "test.png"
fig.savefig(outfile)
plt.close(fig)
```
|
github_jupyter
|
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
from sklearn.metrics import roc_auc_score
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
#fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
#roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
#plt.figure()
from sklearn.metrics import roc_curve, auc
n_classes = 3
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
import matplotlib.pyplot as plt
lw = 2
fig, ax = plt.subplots()
ax.plot(fpr[2], tpr[2], color='#732673',
lw=lw, label='AUROC = %0.2f' % roc_auc[2])
ax.plot([0, 1], [0, 1], color='#505050', lw=lw, linestyle='--')
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.05])
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('True Positive Rate')
ax.set_facecolor('#f7f7f7')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.spines['left'].set_position(('outward', 5))
ax.spines['bottom'].set_position(('outward', 5))
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.grid(b=True, which='major', color='white', linestyle='-', linewidth=3.0)
#ax.grid(b=True, which='minor', color='white', linestyle='-', linewidth=1)
ax.set_axisbelow(True)
ax.legend(loc='best', frameon=False)
plt.minorticks_on()
plt.rcParams.update({'font.size': 18})
plt.show()
outfile = "test.png"
fig.savefig(outfile)
plt.close(fig)
fig, ax = plt.subplots()
ax.scatter(X[0], X[1], color='#732673', alpha = 0.3)
lims = [
np.min([ax.get_xlim(), ax.get_ylim()]), # min of both axes
np.max([ax.get_xlim(), ax.get_ylim()]), # max of both axes
]
ax.plot(lims, lims, alpha=0.75, zorder=0, color='#505050', linestyle='--', dashes = [2,2])
ax.set_aspect('equal')
ax.set_xlim(lims)
ax.set_ylim(lims)
#ax.set_xlim([-0.02, 1.0])
#ax.set_ylim([-0.02, 1.05])
ax.set_xlabel('X Axis Label')
ax.set_ylabel('Y Axis Lavel')
ax.set_facecolor('#f7f7f7')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.spines['left'].set_position(('outward', 5))
ax.spines['bottom'].set_position(('outward', 5))
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.grid(b=True, which='major', color='white', linestyle='-', linewidth=3.0)
#ax.grid(b=True, which='minor', color='white', linestyle='-', linewidth=1)
ax.set_axisbelow(True)
#ax.legend(loc='best', frameon=False)
plt.minorticks_on()
plt.tight_layout()
plt.rcParams.update({'font.size': 18})
plt.show()
outfile = "test.png"
fig.savefig(outfile)
plt.close(fig)
| 0.795579 | 0.806014 |
# Working with Partitions
This document walks you through the most common ways that you might work with a GerryChain `Partition` object.
```
import geopandas
from gerrychain import Partition, Graph
from gerrychain.updaters import cut_edges
```
We'll use our [Pennsylvania VTD shapefile](https://github.com/mggg-states/PA-shapefiles) to create the graph we'll use in these examples.
```
df = geopandas.read_file("https://github.com/mggg-states/PA-shapefiles/raw/master/PA/PA_VTD.zip")
df.set_index("GEOID10", inplace=True)
graph = Graph.from_geodataframe(df)
graph.add_data(df)
```
## Creating a partition
Here is how you can create a Partition:
```
partition = Partition(graph, "2011_PLA_1", {"cut_edges": cut_edges})
```
The `Partition` class takes three arguments to create a Partition:
- A **graph**.
- An **assignment of nodes to districts**. This can be the string name of a node attribute (shapefile column) that holds each node's district assignment, or a dictionary mapping each node ID to its assigned district ID.
- A dictionary of **updaters**.
This creates a partition of the `graph` object we created above from the Pennsylvania shapefile. The partition is defined by the `"2011_PLA_1"` column from our shapefile's attribute table.
## `partition.graph`: the underlying graph
You can access the partition's underlying Graph as `partition.graph`. This contains no information about the partition---it will be the same graph object that you passed in to `Partition()` when you created the partition instance.
`partition.graph` is a [`gerrychain.Graph`](https://gerrychain.readthedocs.io/en/latest/api.html#gerrychain.Graph) object. It is based on the NetworkX Graph object, so any functions (e.g. [`connected_components`](https://networkx.github.io/documentation/stable/reference/algorithms/generated/networkx.algorithms.components.connected_components.html#networkx.algorithms.components.connected_components)) you can find in the [NetworkX documentation](https://networkx.github.io/) will be compatible.
```
partition.graph
```
Now we have a graph of Pennsylvania's VTDs, with all of the data from our shapefile's attribute table attached to the graph as *node attributes*. We can see the data that a node has like this:
```
partition.graph.nodes['42039060']
```
The nodes of the graph are identified by IDs. Here the IDs are the VTDs GEOIDs from the `"GEOID10"` column from our shapefile.
## `partition.assignment`: assign nodes to parts
`partition.assignment` gives you a mapping from node IDs to part IDs ("part" is our generic word for "district"). It is a custom data structure but you can use it just like a dictionary.
```
first_ten_nodes = list(partition.graph.nodes)[:10]
for node in first_ten_nodes:
print(partition.assignment[node])
```
## `partition.parts`: the nodes in each part
`partition.parts` gives you a mapping from each part ID to the set of nodes that belong to that part. This is the "opposite" mapping of `assignment`.
As an example, let's print out the number of nodes in each part:
```
for part in partition.parts:
number_of_nodes = len(partition.parts[part])
print(f"Part {part} has {number_of_nodes} nodes")
```
Notice that `partition.parts` might not loop through the parts in numerical order---but it will always loop through the parts in the same order. (You can run the cell above multiple times to verify that the order doesn't change.)
## `partition.subgraphs`: the subgraphs of each part
For each part of our partition, we can look at the *subgraph* that it defines. That is, we can look at the graph made up of all the nodes in a certain part and all the edges between those nodes.
`partition.subgraphs` gives us a mapping (like a dictionary) from part IDs to their subgraphs. These subgraphs are NetworkX Subgraph objects, and work exactly like our main graph object---nodes, edges, and node attributes all work the same way.
```
for part, subgraph in partition.subgraphs.items():
number_of_edges = len(subgraph.edges)
print(f"Part {part} has {number_of_edges} edges")
```
Let's use NetworkX's [diameter](https://networkx.github.io/documentation/stable/reference/algorithms/generated/networkx.algorithms.distance_measures.diameter.html) function to compute the diameter of each part subgraph. (The *diameter* of a graph is the length of the longest shortest path between any two nodes in the graph. You don't have to know that!)
```
import networkx
for part, subgraph in partition.subgraphs.items():
diameter = networkx.diameter(subgraph)
print(f"Part {part} has diameter {diameter}")
```
## Outputs of updaters
The other main way we can extract information from `partition` is through the updaters that we configured when we created it. We gave `partition` just one updater, `cut_edges`. This is the set of edges that go between nodes that are in *different* parts of the partition.
```
len(partition["cut_edges"])
len(partition.cut_edges)
proportion_of_cut_edges = len(partition.cut_edges) / len(partition.graph.edges)
print("Proportion of edges that are cut:")
print(proportion_of_cut_edges)
```
|
github_jupyter
|
import geopandas
from gerrychain import Partition, Graph
from gerrychain.updaters import cut_edges
df = geopandas.read_file("https://github.com/mggg-states/PA-shapefiles/raw/master/PA/PA_VTD.zip")
df.set_index("GEOID10", inplace=True)
graph = Graph.from_geodataframe(df)
graph.add_data(df)
partition = Partition(graph, "2011_PLA_1", {"cut_edges": cut_edges})
partition.graph
partition.graph.nodes['42039060']
first_ten_nodes = list(partition.graph.nodes)[:10]
for node in first_ten_nodes:
print(partition.assignment[node])
for part in partition.parts:
number_of_nodes = len(partition.parts[part])
print(f"Part {part} has {number_of_nodes} nodes")
for part, subgraph in partition.subgraphs.items():
number_of_edges = len(subgraph.edges)
print(f"Part {part} has {number_of_edges} edges")
import networkx
for part, subgraph in partition.subgraphs.items():
diameter = networkx.diameter(subgraph)
print(f"Part {part} has diameter {diameter}")
len(partition["cut_edges"])
len(partition.cut_edges)
proportion_of_cut_edges = len(partition.cut_edges) / len(partition.graph.edges)
print("Proportion of edges that are cut:")
print(proportion_of_cut_edges)
| 0.310694 | 0.986258 |
```
cd ..
%matplotlib inline
%load_ext autoreload
%autoreload 2
import math
import copy
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import sklearn.linear_model as linear_model
import sklearn.preprocessing as preprocessing
import scipy
import scipy.linalg as slin
import scipy.sparse.linalg as sparselin
import scipy.sparse as sparse
sns.set(color_codes=True)
max_iters = 50
import pickle
train_acc, test_acc, train_loss, test_loss, grad_means = pickle.load(open("output/metrics_0.001-wd_100-iter_grad-atk.p", "rb"))
train_acc, test_acc, train_loss, test_loss = train_acc[:max_iters], test_acc[:max_iters], train_loss[:max_iters], test_loss[:max_iters]
iters = list(range(len(train_acc)))
sns.set_style('white')
fontsize=14
fig, axs = plt.subplots(2, 2, figsize=(8, 8))
axs[0][0].plot(iters, train_acc)
axs[0][0].set_xlim([0, len(train_acc)])
axs[0][0].set_ylim([0.6, 1])
axs[0][0].set_xlabel('No. iters', fontsize=fontsize)
axs[0][0].set_ylabel('Train Accuracy', fontsize=fontsize)
axs[0][1].plot(iters, test_acc)
axs[0][1].set_xlim([0, len(test_acc)])
axs[0][1].set_ylim([0.6, 1])
axs[0][1].yaxis.set_ticks([])
axs[0][1].set_xlabel('No. iters', fontsize=fontsize)
axs[0][1].set_ylabel('Test Accuracy', fontsize=fontsize)
axs[1][0].plot(iters, train_loss)
axs[1][0].set_xlim([0, len(train_loss)])
axs[1][0].set_ylim([0.6, 0.8])
axs[1][0].set_xlabel('No. iters', fontsize=fontsize)
axs[1][0].set_ylabel('Train Loss', fontsize=fontsize)
axs[1][1].plot(iters, test_loss)
axs[1][1].set_xlim([0, len(test_loss)])
axs[1][1].set_ylim([0.6, 1.5])
axs[1][1].yaxis.set_ticks([])
axs[1][1].set_xlabel('No. iters', fontsize=fontsize)
axs[1][1].set_ylabel('Test Loss', fontsize=fontsize)
def load_wide_grad():
train_acc, test_acc, train_loss, test_loss, grad_means = pickle.load(open("output/metrics_0.001-wd_100-iter_grad-atk.p", "rb"))
train_acc, test_acc, train_loss, test_loss = train_acc[:max_iters], test_acc[:max_iters], train_loss[:max_iters], test_loss[:max_iters]
iters = list(range(len(train_acc)))
return train_acc, test_acc, train_loss, test_loss, iters
def load_narrow_grad():
train_acc, test_acc, train_loss, test_loss, grad_means = pickle.load(open("output/metrics_0.0002-wd_100-iter_grad-atk.p", "rb"))
train_acc, test_acc, train_loss, test_loss = train_acc[:max_iters], test_acc[:max_iters], train_loss[:max_iters], test_loss[:max_iters]
iters = list(range(len(train_acc)))[:max_iters]
return train_acc, test_acc, train_loss, test_loss, iters
def load_narrow_svm():
train_acc, test_acc, train_loss, test_loss, grad_means = pickle.load(open("output/metrics_0.0002-wd_100-iter_sv-atk.p", "rb"))
train_acc, test_acc, train_loss, test_loss = train_acc[:max_iters], test_acc[:max_iters], train_loss[:max_iters], test_loss[:max_iters]
iters = list(range(len(train_acc)))[:max_iters]
return train_acc, test_acc, train_loss, test_loss, iters
sns.set_style('white')
fontsize=10
fig, axs = plt.subplots(4, 3, figsize=(5, 6))
train_acc_wide_grad, test_acc_wide_grad, train_loss_wide_grad, test_loss_wide_grad, iters_wide_grad = load_wide_grad()
train_acc_narrow_grad, test_acc_narrow_grad, train_loss_narrow_grad, test_loss_narrow_grad, iters_narrow_grad = load_narrow_grad()
train_acc_narrow_svm, test_acc_narrow_svm, train_loss_narrow_svm, test_loss_narrow_svm, iters_narrow_svm = load_narrow_svm()
# print(test_acc_narrow_grad[:50])
# print(test_acc_wide_grad[:25])
print(test_acc_narrow_svm[:25])
axs[0][0].plot(iters_wide_grad, train_acc_wide_grad)
axs[0][0].set_xlim([0, len(iters_wide_grad)])
axs[0][0].set_ylim([0.6, 0.9])
axs[0][0].xaxis.set_ticks([])
axs[0][0].set_ylabel('Train Accuracy', fontsize=fontsize)
axs[0][0].set_title('0.001-grad')
axs[0][1].plot(iters_narrow_grad, train_acc_narrow_grad)
axs[0][1].set_xlim([0, len(iters_narrow_grad)])
axs[0][1].set_ylim([0.6, 0.9])
axs[0][1].xaxis.set_ticks([])
axs[0][1].yaxis.set_ticks([])
axs[0][1].set_title('0.0002-grad')
axs[0][2].plot(iters_narrow_svm, train_acc_narrow_svm)
axs[0][2].set_xlim([0, len(iters_narrow_svm)])
axs[0][2].set_ylim([0.6, 0.9])
axs[0][2].xaxis.set_ticks([])
axs[0][2].yaxis.set_ticks([])
axs[0][2].set_title('0.0002-sv')
axs[1][0].plot(iters_wide_grad, test_acc_wide_grad)
axs[1][0].set_xlim([0, len(iters_wide_grad)])
axs[1][0].set_ylim([0.6, 0.8])
axs[1][0].set_ylabel('Test Accuracy', fontsize=fontsize)
axs[1][0].xaxis.set_ticks([])
axs[1][1].plot(iters_narrow_grad, test_acc_narrow_grad)
axs[1][1].set_xlim([0, len(iters_narrow_grad)])
axs[1][1].set_ylim([0.6, 0.8])
axs[1][1].xaxis.set_ticks([])
axs[1][1].yaxis.set_ticks([])
axs[1][2].plot(iters_narrow_svm, test_acc_narrow_svm)
axs[1][2].set_xlim([0, len(iters_narrow_svm)])
axs[1][2].set_ylim([0.6, 0.8])
axs[1][2].xaxis.set_ticks([])
axs[1][2].yaxis.set_ticks([])
axs[2][0].plot(iters_wide_grad, train_loss_wide_grad)
axs[2][0].set_xlim([0, len(iters_wide_grad)])
axs[2][0].set_ylim([0.25, 0.85])
axs[2][0].set_ylabel('Train Loss', fontsize=fontsize)
axs[2][0].xaxis.set_ticks([])
axs[2][1].plot(iters_narrow_grad, train_loss_narrow_grad)
axs[2][1].set_xlim([0, len(iters_narrow_grad)])
axs[2][1].set_ylim([0.25, 0.85])
axs[2][1].xaxis.set_ticks([])
axs[2][1].yaxis.set_ticks([])
axs[2][2].plot(iters_narrow_svm, train_loss_narrow_svm)
axs[2][2].set_xlim([0, len(iters_narrow_svm)])
axs[2][2].set_ylim([0.25, 0.85])
axs[2][2].xaxis.set_ticks([])
axs[2][2].yaxis.set_ticks([])
axs[3][0].plot(iters_wide_grad, test_loss_wide_grad)
axs[3][0].set_xlim([0, len(iters_wide_grad)])
axs[3][0].set_ylim([0.65, 0.85])
axs[3][0].set_ylabel('Test Loss', fontsize=fontsize)
axs[3][0].set_xlabel('No. iters', fontsize=fontsize)
axs[3][1].plot(iters_narrow_grad, test_loss_narrow_grad)
axs[3][1].set_xlim([0, len(iters_narrow_grad)])
axs[3][1].set_ylim([0.65, 0.85])
axs[3][1].yaxis.set_ticks([])
axs[3][1].set_xlabel('No. iters', fontsize=fontsize)
axs[3][2].plot(iters_narrow_svm, test_loss_narrow_svm)
axs[3][2].set_xlim([0, len(iters_narrow_svm)])
axs[3][2].set_ylim([0.65, 0.85])
axs[3][2].yaxis.set_ticks([])
axs[3][2].set_xlabel('No. iters', fontsize=fontsize)
plt.savefig("figs/svm-adv-atk_small.png", dpi=300, bbox_inches='tight')
import pickle
train_acc, test_acc, train_loss, test_loss, grad_means = pickle.load(open("output/metrics_0.0002-wd_100-iter_sv-atk.p", "rb"))
train_acc, test_acc, train_loss, test_loss = train_acc[:max_iters], test_acc[:max_iters], train_loss[:max_iters], test_loss[:max_iters]
iters = list(range(len(train_acc)))[:max_iters]
sns.set_style('white')
fontsize=14
fig, axs = plt.subplots(2, 2, figsize=(8, 8))
axs[0][0].plot(iters, train_acc)
axs[0][0].set_xlim([0, len(train_acc)])
axs[0][0].set_ylim([0.6, 1])
axs[0][0].set_xlabel('No. iters', fontsize=fontsize)
axs[0][0].set_ylabel('Train Accuracy', fontsize=fontsize)
axs[0][1].plot(iters, test_acc)
axs[0][1].set_xlim([0, len(test_acc)])
axs[0][1].set_ylim([0.6, 1])
axs[0][1].yaxis.set_ticks([])
axs[0][1].set_xlabel('No. iters', fontsize=fontsize)
axs[0][1].set_ylabel('Test Accuracy', fontsize=fontsize)
axs[1][0].plot(iters, train_loss)
axs[1][0].set_xlim([0, len(train_loss)])
axs[1][0].set_ylim([0, 0.8])
axs[1][0].set_xlabel('No. iters', fontsize=fontsize)
axs[1][0].set_ylabel('Train Loss', fontsize=fontsize)
axs[1][1].plot(iters, test_loss)
axs[1][1].set_xlim([0, len(test_loss)])
axs[1][1].set_ylim([0, 0.8])
axs[1][1].yaxis.set_ticks([])
axs[1][1].set_xlabel('No. iters', fontsize=fontsize)
axs[1][1].set_ylabel('Test Loss', fontsize=fontsize)
```
|
github_jupyter
|
cd ..
%matplotlib inline
%load_ext autoreload
%autoreload 2
import math
import copy
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import sklearn.linear_model as linear_model
import sklearn.preprocessing as preprocessing
import scipy
import scipy.linalg as slin
import scipy.sparse.linalg as sparselin
import scipy.sparse as sparse
sns.set(color_codes=True)
max_iters = 50
import pickle
train_acc, test_acc, train_loss, test_loss, grad_means = pickle.load(open("output/metrics_0.001-wd_100-iter_grad-atk.p", "rb"))
train_acc, test_acc, train_loss, test_loss = train_acc[:max_iters], test_acc[:max_iters], train_loss[:max_iters], test_loss[:max_iters]
iters = list(range(len(train_acc)))
sns.set_style('white')
fontsize=14
fig, axs = plt.subplots(2, 2, figsize=(8, 8))
axs[0][0].plot(iters, train_acc)
axs[0][0].set_xlim([0, len(train_acc)])
axs[0][0].set_ylim([0.6, 1])
axs[0][0].set_xlabel('No. iters', fontsize=fontsize)
axs[0][0].set_ylabel('Train Accuracy', fontsize=fontsize)
axs[0][1].plot(iters, test_acc)
axs[0][1].set_xlim([0, len(test_acc)])
axs[0][1].set_ylim([0.6, 1])
axs[0][1].yaxis.set_ticks([])
axs[0][1].set_xlabel('No. iters', fontsize=fontsize)
axs[0][1].set_ylabel('Test Accuracy', fontsize=fontsize)
axs[1][0].plot(iters, train_loss)
axs[1][0].set_xlim([0, len(train_loss)])
axs[1][0].set_ylim([0.6, 0.8])
axs[1][0].set_xlabel('No. iters', fontsize=fontsize)
axs[1][0].set_ylabel('Train Loss', fontsize=fontsize)
axs[1][1].plot(iters, test_loss)
axs[1][1].set_xlim([0, len(test_loss)])
axs[1][1].set_ylim([0.6, 1.5])
axs[1][1].yaxis.set_ticks([])
axs[1][1].set_xlabel('No. iters', fontsize=fontsize)
axs[1][1].set_ylabel('Test Loss', fontsize=fontsize)
def load_wide_grad():
train_acc, test_acc, train_loss, test_loss, grad_means = pickle.load(open("output/metrics_0.001-wd_100-iter_grad-atk.p", "rb"))
train_acc, test_acc, train_loss, test_loss = train_acc[:max_iters], test_acc[:max_iters], train_loss[:max_iters], test_loss[:max_iters]
iters = list(range(len(train_acc)))
return train_acc, test_acc, train_loss, test_loss, iters
def load_narrow_grad():
train_acc, test_acc, train_loss, test_loss, grad_means = pickle.load(open("output/metrics_0.0002-wd_100-iter_grad-atk.p", "rb"))
train_acc, test_acc, train_loss, test_loss = train_acc[:max_iters], test_acc[:max_iters], train_loss[:max_iters], test_loss[:max_iters]
iters = list(range(len(train_acc)))[:max_iters]
return train_acc, test_acc, train_loss, test_loss, iters
def load_narrow_svm():
train_acc, test_acc, train_loss, test_loss, grad_means = pickle.load(open("output/metrics_0.0002-wd_100-iter_sv-atk.p", "rb"))
train_acc, test_acc, train_loss, test_loss = train_acc[:max_iters], test_acc[:max_iters], train_loss[:max_iters], test_loss[:max_iters]
iters = list(range(len(train_acc)))[:max_iters]
return train_acc, test_acc, train_loss, test_loss, iters
sns.set_style('white')
fontsize=10
fig, axs = plt.subplots(4, 3, figsize=(5, 6))
train_acc_wide_grad, test_acc_wide_grad, train_loss_wide_grad, test_loss_wide_grad, iters_wide_grad = load_wide_grad()
train_acc_narrow_grad, test_acc_narrow_grad, train_loss_narrow_grad, test_loss_narrow_grad, iters_narrow_grad = load_narrow_grad()
train_acc_narrow_svm, test_acc_narrow_svm, train_loss_narrow_svm, test_loss_narrow_svm, iters_narrow_svm = load_narrow_svm()
# print(test_acc_narrow_grad[:50])
# print(test_acc_wide_grad[:25])
print(test_acc_narrow_svm[:25])
axs[0][0].plot(iters_wide_grad, train_acc_wide_grad)
axs[0][0].set_xlim([0, len(iters_wide_grad)])
axs[0][0].set_ylim([0.6, 0.9])
axs[0][0].xaxis.set_ticks([])
axs[0][0].set_ylabel('Train Accuracy', fontsize=fontsize)
axs[0][0].set_title('0.001-grad')
axs[0][1].plot(iters_narrow_grad, train_acc_narrow_grad)
axs[0][1].set_xlim([0, len(iters_narrow_grad)])
axs[0][1].set_ylim([0.6, 0.9])
axs[0][1].xaxis.set_ticks([])
axs[0][1].yaxis.set_ticks([])
axs[0][1].set_title('0.0002-grad')
axs[0][2].plot(iters_narrow_svm, train_acc_narrow_svm)
axs[0][2].set_xlim([0, len(iters_narrow_svm)])
axs[0][2].set_ylim([0.6, 0.9])
axs[0][2].xaxis.set_ticks([])
axs[0][2].yaxis.set_ticks([])
axs[0][2].set_title('0.0002-sv')
axs[1][0].plot(iters_wide_grad, test_acc_wide_grad)
axs[1][0].set_xlim([0, len(iters_wide_grad)])
axs[1][0].set_ylim([0.6, 0.8])
axs[1][0].set_ylabel('Test Accuracy', fontsize=fontsize)
axs[1][0].xaxis.set_ticks([])
axs[1][1].plot(iters_narrow_grad, test_acc_narrow_grad)
axs[1][1].set_xlim([0, len(iters_narrow_grad)])
axs[1][1].set_ylim([0.6, 0.8])
axs[1][1].xaxis.set_ticks([])
axs[1][1].yaxis.set_ticks([])
axs[1][2].plot(iters_narrow_svm, test_acc_narrow_svm)
axs[1][2].set_xlim([0, len(iters_narrow_svm)])
axs[1][2].set_ylim([0.6, 0.8])
axs[1][2].xaxis.set_ticks([])
axs[1][2].yaxis.set_ticks([])
axs[2][0].plot(iters_wide_grad, train_loss_wide_grad)
axs[2][0].set_xlim([0, len(iters_wide_grad)])
axs[2][0].set_ylim([0.25, 0.85])
axs[2][0].set_ylabel('Train Loss', fontsize=fontsize)
axs[2][0].xaxis.set_ticks([])
axs[2][1].plot(iters_narrow_grad, train_loss_narrow_grad)
axs[2][1].set_xlim([0, len(iters_narrow_grad)])
axs[2][1].set_ylim([0.25, 0.85])
axs[2][1].xaxis.set_ticks([])
axs[2][1].yaxis.set_ticks([])
axs[2][2].plot(iters_narrow_svm, train_loss_narrow_svm)
axs[2][2].set_xlim([0, len(iters_narrow_svm)])
axs[2][2].set_ylim([0.25, 0.85])
axs[2][2].xaxis.set_ticks([])
axs[2][2].yaxis.set_ticks([])
axs[3][0].plot(iters_wide_grad, test_loss_wide_grad)
axs[3][0].set_xlim([0, len(iters_wide_grad)])
axs[3][0].set_ylim([0.65, 0.85])
axs[3][0].set_ylabel('Test Loss', fontsize=fontsize)
axs[3][0].set_xlabel('No. iters', fontsize=fontsize)
axs[3][1].plot(iters_narrow_grad, test_loss_narrow_grad)
axs[3][1].set_xlim([0, len(iters_narrow_grad)])
axs[3][1].set_ylim([0.65, 0.85])
axs[3][1].yaxis.set_ticks([])
axs[3][1].set_xlabel('No. iters', fontsize=fontsize)
axs[3][2].plot(iters_narrow_svm, test_loss_narrow_svm)
axs[3][2].set_xlim([0, len(iters_narrow_svm)])
axs[3][2].set_ylim([0.65, 0.85])
axs[3][2].yaxis.set_ticks([])
axs[3][2].set_xlabel('No. iters', fontsize=fontsize)
plt.savefig("figs/svm-adv-atk_small.png", dpi=300, bbox_inches='tight')
import pickle
train_acc, test_acc, train_loss, test_loss, grad_means = pickle.load(open("output/metrics_0.0002-wd_100-iter_sv-atk.p", "rb"))
train_acc, test_acc, train_loss, test_loss = train_acc[:max_iters], test_acc[:max_iters], train_loss[:max_iters], test_loss[:max_iters]
iters = list(range(len(train_acc)))[:max_iters]
sns.set_style('white')
fontsize=14
fig, axs = plt.subplots(2, 2, figsize=(8, 8))
axs[0][0].plot(iters, train_acc)
axs[0][0].set_xlim([0, len(train_acc)])
axs[0][0].set_ylim([0.6, 1])
axs[0][0].set_xlabel('No. iters', fontsize=fontsize)
axs[0][0].set_ylabel('Train Accuracy', fontsize=fontsize)
axs[0][1].plot(iters, test_acc)
axs[0][1].set_xlim([0, len(test_acc)])
axs[0][1].set_ylim([0.6, 1])
axs[0][1].yaxis.set_ticks([])
axs[0][1].set_xlabel('No. iters', fontsize=fontsize)
axs[0][1].set_ylabel('Test Accuracy', fontsize=fontsize)
axs[1][0].plot(iters, train_loss)
axs[1][0].set_xlim([0, len(train_loss)])
axs[1][0].set_ylim([0, 0.8])
axs[1][0].set_xlabel('No. iters', fontsize=fontsize)
axs[1][0].set_ylabel('Train Loss', fontsize=fontsize)
axs[1][1].plot(iters, test_loss)
axs[1][1].set_xlim([0, len(test_loss)])
axs[1][1].set_ylim([0, 0.8])
axs[1][1].yaxis.set_ticks([])
axs[1][1].set_xlabel('No. iters', fontsize=fontsize)
axs[1][1].set_ylabel('Test Loss', fontsize=fontsize)
| 0.364438 | 0.360067 |
# Investigate Model
```
import os
import imp
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import keras
from keras import backend as K
from keras.preprocessing import image
from keras.utils import plot_model
from keras import models
import geopandas as gpd
from contextlib import redirect_stdout
#-- Set up configurations / parameters
ndown = 4 # number of 'down' steps
ninit = 32 #number of channels to start with
dropout_frac = 0.2 # dropout fraction
ratio = 727 # penalization ratio for GL and non-GL points based on smaller dataaset
mod_lbl = 'atrous' #'unet'
n_test = 500
if mod_lbl == 'unet':
mod_str = '{0}_{1}init_{2}down_drop{3:.1f}_customLossR{4}'.format(mod_lbl,ninit,ndown,
dropout_frac,ratio)
elif mod_lbl == 'atrous':
mod_str = '{0}_{1}init_drop{2:.1f}_customLossR{3}'.format(mod_lbl,ninit,dropout_frac,ratio)
else:
print('model label not matching.')
print(mod_str)
#-- Directory setup
current_dir = os.getcwd()
gdrive = os.path.expanduser('~/Google Drive File Stream')
colabdir = os.path.join(gdrive,'My Drive','Colab Notebooks')
ddir = os.path.join(gdrive,'Shared drives','GROUNDING_LINE_TEAM_DRIVE','ML_Yara','geocoded_v1')
test_dir = os.path.join(ddir,'test_n%i.dir'%n_test)
output_dir = os.path.expanduser('~/GL_learning_data/')
print(current_dir)
#-- Get list of images
fileList = os.listdir(test_dir)
fname = 'gl_069_181124-181130-181206-181212_013745-024816-013920-024991_T110456_T110456_x1536_y0512_DIR11'
file_ind = fileList.index('coco_%s.npy'%fname)
im = np.load(os.path.join(test_dir,fileList[file_ind]))
h,wi,ch = im.shape
#-- also read the corresponding shapefile
shpfile = os.path.join(output_dir,'geocoded_v1','stitched.dir','atrous_32init_drop0.2_customLossR727.dir',\
'shapefiles.dir','%s_6.0km.shp'%fname.split('_x')[0])
gdf = gpd.read_file(shpfile)
x,y = gdf['geometry'][4].coords.xy
x,y = np.array(x),np.array(y)
x1 = x.min() - 2e5
x2 = x.max() + 2e5
y1 = y.min() - 2e5
y2 = y.max() + 2e5
fig,ax = plt.subplots(1,3,figsize=(20,10))
world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))
world = world.to_crs(gdf.crs)
world.plot(ax=ax[0],color='lightgray', edgecolor='lightgray')
ax[0].set_xlim([-2700000,2700000])
ax[0].set_ylim([-2400000,2400000])
# ax[0].set_xlim([-2600000,1.6e6])
# ax[0].set_ylim([-2200000,1.6e6])
gdf.plot(ax=ax[0],color='red', edgecolor='red')
ax[0].plot([x1,x1,x2,x2,x1],[y1,y2,y2,y1,y1],color='cyan',linewidth=2)
ax[1].matshow(im[:,:,0])
#-- add scale bar
ax[2].set_xlim([0,wi])
ax[2].set_ylim([h,0])
ax[2].plot([10,10],[100,200],color='black',linewidth=2.)
ax[2].text(110,150,'10 km',horizontalalignment='center',\
verticalalignment='center', color='black')
ax[2].axis('off')
for i in range(3):
ax[i].set_aspect('equal')
fig= plt.figure(1,figsize=(5,5))
ax = fig.add_subplot(111)
world.plot(ax=ax, color='lightgray', edgecolor='lightgray')
ax.set_xlim([-2700000,2700000])
ax.set_ylim([-2400000,2400000])
gdf.plot(ax=ax,color='red', edgecolor='red')
ax.plot([x1,x1,x2,x2,x1],[y1,y2,y2,y1,y1],color='black',linewidth=3)
ax.set_aspect('equal')
ax.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(output_dir,'antarctic_inset_pipeline-figure.pdf'),format='PDF')
plt.close()
#-- Import model
mod_module = imp.load_source('nn_model',os.path.join(colabdir,'nn_model.py'))
#-- set up model
if mod_lbl == 'unet':
print('loading unet model')
model = mod_module.unet_model_double_dropout(height=h,width=wi,channels=ch,
n_init=ninit,n_layers=ndown,
drop=dropout_frac)
elif mod_lbl == 'atrous':
print("loading atrous model")
model = mod_module.nn_model_atrous_double_dropout(height=h,width=wi,
channels=ch,
n_filts=ninit,
drop=dropout_frac)
else:
print('Model label not correct.')
#-- define custom loss function
def customLoss(yTrue,yPred):
return -1*K.mean(ratio*(yTrue*K.log(yPred+1e-32)) + ((1. - yTrue)*K.log(1-yPred+1e-32)))
#-- compile imported model
model.compile(loss=customLoss,optimizer='adam',
metrics=['accuracy'])
#-- checkpoint file
chk_file = os.path.join(output_dir,'{0}_weights.h5'.format(mod_str))
#-- if file exists, read model from file
if os.path.isfile(chk_file):
print('Check point exists; loading model from file.')
#-- load weights
model.load_weights(chk_file)
else:
sys.exit('Model does not previously exist.')
model.summary()
#-- save model architecture to file
with open(os.path.join(current_dir,'modelsummary.txt'), 'w') as f:
with redirect_stdout(f):
model.summary()
json_mod = model.to_json()
with open('modelsummary.json', 'w') as json_file:
json_file.write(json_mod)
yaml_mod = model.to_yaml()
with open('modelsummary.yaml', 'w') as yaml_file:
yaml_file.write(yaml_mod)
with open(os.path.join(current_dir,'modelsummary.json'), 'w') as f:
with redirect_stdout(f):
model.to_json()
for i in [0,4,15,16,21,24]:
print(i+1,model.layers[i].output_shape)
model.layers[21].output_shape[-1]
im[np.newaxis, ...].shape
layer_outputs = [layer.output for layer in model.layers[:26]]
#-- extract output of first 22 layers
activation_model = models.Model(inputs=model.input, outputs=layer_outputs)
activations = activation_model.predict(im[np.newaxis, ...])
activations[13].shape
def get_axis_limits(ax):
xinc = np.abs(ax.get_xlim()[1]-ax.get_xlim()[0])*0.05
yinc = np.abs(ax.get_ylim()[1]-ax.get_ylim()[0])*0.12
return ax.get_xlim()[0]+xinc, ax.get_ylim()[1]+yinc
#-- Set up figure
m,n = 4,4
fig,ax = plt.subplots(m,n,figsize=(10,9))
# gs1 = gridspec.GridSpec(m,n)
# gs1.update(wspace=0.0, hspace=0.0)
cmap = 'gnuplot2'
lbl = 65
bbox = dict(boxstyle="round", fc="0.8")
#-- first row: inputs
for i in [1,2]:
ax[0,i].matshow(activations[0][0,:,:,i-1],cmap=cmap)
ax[0,i].annotate(chr(lbl), xy=get_axis_limits(ax[0,i]),bbox=bbox)
ax[0,i].set_aspect('equal')
lbl += 1
ax[0,1].set_title('Real (Input)')
ax[0,2].set_title('Imaginary (Input)')
#-- set axes ticks
ax[0,0].axis('off')
ax[0,1].xaxis.set_ticks([0,len(activations[0][0,:,0,i-1])])
ax[0,1].yaxis.set_ticks([0,len(activations[0][0,0,:,i-1])])
ax[0,2].get_xaxis().set_visible(False)
ax[0,2].get_yaxis().set_visible(False)
ax[0,3].axis('off')
#-- add antarctic map (0,0)
world.plot(ax=ax[0,0],color='lightgray', edgecolor='lightgray')
ax[0,0].set_xlim([-2700000,2700000])
ax[0,0].set_ylim([-2400000,2400000])
gdf.plot(ax=ax[0,0],color='red', edgecolor='red')
ax[0,0].plot([x1,x1,x2,x2,x1],[y1,y2,y2,y1,y1],color='cyan',linewidth=2)
#-- add scale bar (0,3)
ax[0,3].set_xlim([0,wi])
ax[0,3].set_ylim([h,0])
ax[0,3].plot([10,10],[100,200],color='black',linewidth=2.)
ax[0,3].text(110,150,'10 km',horizontalalignment='center',\
verticalalignment='center', color='black')
ax[0,3].set_aspect('equal')
#-- plot other rows in a loop
for v,w in zip([1,2,3],[4,15,21]):
for i in range(n):
#-- determine the channel number based on total # of chanels
nch = int(model.layers[w].output_shape[-1]/4)
ax[v,i].matshow(activations[w][0, :, :, i*nch + 1], cmap=cmap)
# fig.colorbar(p,ax=ax[v,i])
ax[v,i].annotate(chr(lbl),get_axis_limits(ax[v,i]),bbox=bbox)
ax[v,i].set_aspect('equal')
ax[v,i].set_title('Ch.%i'%(i*nch + 2))
lbl += 1
if i != 0:
ax[v,i].get_xaxis().set_visible(False)
ax[v,i].get_yaxis().set_visible(False)
else:
ax[v,i].xaxis.set_ticks([0,len(activations[w][0,:,0,i-1])])
ax[v,i].yaxis.set_ticks([0,len(activations[w][0,0,:,i-1])])
#-- label rows
# ax[0,0].set_ylabel('Input', size='large')
ax[1,0].set_ylabel('Layer 5', size='large')
ax[2,0].set_ylabel('Layer 16', size='large')
ax[3,0].set_ylabel('Layer 22', size='large')
# fig.suptitle('Activation Maps')
plt.tight_layout()
plt.subplots_adjust(wspace=0.0, hspace=0.2)
plt.savefig(os.path.join(output_dir,'geocoded_v1','Test_predictions.dir',\
'%s.dir'%mod_str,'activation_map_%s.pdf'%fname),format='PDF')
plt.close(fig)
```
|
github_jupyter
|
import os
import imp
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import keras
from keras import backend as K
from keras.preprocessing import image
from keras.utils import plot_model
from keras import models
import geopandas as gpd
from contextlib import redirect_stdout
#-- Set up configurations / parameters
ndown = 4 # number of 'down' steps
ninit = 32 #number of channels to start with
dropout_frac = 0.2 # dropout fraction
ratio = 727 # penalization ratio for GL and non-GL points based on smaller dataaset
mod_lbl = 'atrous' #'unet'
n_test = 500
if mod_lbl == 'unet':
mod_str = '{0}_{1}init_{2}down_drop{3:.1f}_customLossR{4}'.format(mod_lbl,ninit,ndown,
dropout_frac,ratio)
elif mod_lbl == 'atrous':
mod_str = '{0}_{1}init_drop{2:.1f}_customLossR{3}'.format(mod_lbl,ninit,dropout_frac,ratio)
else:
print('model label not matching.')
print(mod_str)
#-- Directory setup
current_dir = os.getcwd()
gdrive = os.path.expanduser('~/Google Drive File Stream')
colabdir = os.path.join(gdrive,'My Drive','Colab Notebooks')
ddir = os.path.join(gdrive,'Shared drives','GROUNDING_LINE_TEAM_DRIVE','ML_Yara','geocoded_v1')
test_dir = os.path.join(ddir,'test_n%i.dir'%n_test)
output_dir = os.path.expanduser('~/GL_learning_data/')
print(current_dir)
#-- Get list of images
fileList = os.listdir(test_dir)
fname = 'gl_069_181124-181130-181206-181212_013745-024816-013920-024991_T110456_T110456_x1536_y0512_DIR11'
file_ind = fileList.index('coco_%s.npy'%fname)
im = np.load(os.path.join(test_dir,fileList[file_ind]))
h,wi,ch = im.shape
#-- also read the corresponding shapefile
shpfile = os.path.join(output_dir,'geocoded_v1','stitched.dir','atrous_32init_drop0.2_customLossR727.dir',\
'shapefiles.dir','%s_6.0km.shp'%fname.split('_x')[0])
gdf = gpd.read_file(shpfile)
x,y = gdf['geometry'][4].coords.xy
x,y = np.array(x),np.array(y)
x1 = x.min() - 2e5
x2 = x.max() + 2e5
y1 = y.min() - 2e5
y2 = y.max() + 2e5
fig,ax = plt.subplots(1,3,figsize=(20,10))
world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))
world = world.to_crs(gdf.crs)
world.plot(ax=ax[0],color='lightgray', edgecolor='lightgray')
ax[0].set_xlim([-2700000,2700000])
ax[0].set_ylim([-2400000,2400000])
# ax[0].set_xlim([-2600000,1.6e6])
# ax[0].set_ylim([-2200000,1.6e6])
gdf.plot(ax=ax[0],color='red', edgecolor='red')
ax[0].plot([x1,x1,x2,x2,x1],[y1,y2,y2,y1,y1],color='cyan',linewidth=2)
ax[1].matshow(im[:,:,0])
#-- add scale bar
ax[2].set_xlim([0,wi])
ax[2].set_ylim([h,0])
ax[2].plot([10,10],[100,200],color='black',linewidth=2.)
ax[2].text(110,150,'10 km',horizontalalignment='center',\
verticalalignment='center', color='black')
ax[2].axis('off')
for i in range(3):
ax[i].set_aspect('equal')
fig= plt.figure(1,figsize=(5,5))
ax = fig.add_subplot(111)
world.plot(ax=ax, color='lightgray', edgecolor='lightgray')
ax.set_xlim([-2700000,2700000])
ax.set_ylim([-2400000,2400000])
gdf.plot(ax=ax,color='red', edgecolor='red')
ax.plot([x1,x1,x2,x2,x1],[y1,y2,y2,y1,y1],color='black',linewidth=3)
ax.set_aspect('equal')
ax.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(output_dir,'antarctic_inset_pipeline-figure.pdf'),format='PDF')
plt.close()
#-- Import model
mod_module = imp.load_source('nn_model',os.path.join(colabdir,'nn_model.py'))
#-- set up model
if mod_lbl == 'unet':
print('loading unet model')
model = mod_module.unet_model_double_dropout(height=h,width=wi,channels=ch,
n_init=ninit,n_layers=ndown,
drop=dropout_frac)
elif mod_lbl == 'atrous':
print("loading atrous model")
model = mod_module.nn_model_atrous_double_dropout(height=h,width=wi,
channels=ch,
n_filts=ninit,
drop=dropout_frac)
else:
print('Model label not correct.')
#-- define custom loss function
def customLoss(yTrue,yPred):
return -1*K.mean(ratio*(yTrue*K.log(yPred+1e-32)) + ((1. - yTrue)*K.log(1-yPred+1e-32)))
#-- compile imported model
model.compile(loss=customLoss,optimizer='adam',
metrics=['accuracy'])
#-- checkpoint file
chk_file = os.path.join(output_dir,'{0}_weights.h5'.format(mod_str))
#-- if file exists, read model from file
if os.path.isfile(chk_file):
print('Check point exists; loading model from file.')
#-- load weights
model.load_weights(chk_file)
else:
sys.exit('Model does not previously exist.')
model.summary()
#-- save model architecture to file
with open(os.path.join(current_dir,'modelsummary.txt'), 'w') as f:
with redirect_stdout(f):
model.summary()
json_mod = model.to_json()
with open('modelsummary.json', 'w') as json_file:
json_file.write(json_mod)
yaml_mod = model.to_yaml()
with open('modelsummary.yaml', 'w') as yaml_file:
yaml_file.write(yaml_mod)
with open(os.path.join(current_dir,'modelsummary.json'), 'w') as f:
with redirect_stdout(f):
model.to_json()
for i in [0,4,15,16,21,24]:
print(i+1,model.layers[i].output_shape)
model.layers[21].output_shape[-1]
im[np.newaxis, ...].shape
layer_outputs = [layer.output for layer in model.layers[:26]]
#-- extract output of first 22 layers
activation_model = models.Model(inputs=model.input, outputs=layer_outputs)
activations = activation_model.predict(im[np.newaxis, ...])
activations[13].shape
def get_axis_limits(ax):
xinc = np.abs(ax.get_xlim()[1]-ax.get_xlim()[0])*0.05
yinc = np.abs(ax.get_ylim()[1]-ax.get_ylim()[0])*0.12
return ax.get_xlim()[0]+xinc, ax.get_ylim()[1]+yinc
#-- Set up figure
m,n = 4,4
fig,ax = plt.subplots(m,n,figsize=(10,9))
# gs1 = gridspec.GridSpec(m,n)
# gs1.update(wspace=0.0, hspace=0.0)
cmap = 'gnuplot2'
lbl = 65
bbox = dict(boxstyle="round", fc="0.8")
#-- first row: inputs
for i in [1,2]:
ax[0,i].matshow(activations[0][0,:,:,i-1],cmap=cmap)
ax[0,i].annotate(chr(lbl), xy=get_axis_limits(ax[0,i]),bbox=bbox)
ax[0,i].set_aspect('equal')
lbl += 1
ax[0,1].set_title('Real (Input)')
ax[0,2].set_title('Imaginary (Input)')
#-- set axes ticks
ax[0,0].axis('off')
ax[0,1].xaxis.set_ticks([0,len(activations[0][0,:,0,i-1])])
ax[0,1].yaxis.set_ticks([0,len(activations[0][0,0,:,i-1])])
ax[0,2].get_xaxis().set_visible(False)
ax[0,2].get_yaxis().set_visible(False)
ax[0,3].axis('off')
#-- add antarctic map (0,0)
world.plot(ax=ax[0,0],color='lightgray', edgecolor='lightgray')
ax[0,0].set_xlim([-2700000,2700000])
ax[0,0].set_ylim([-2400000,2400000])
gdf.plot(ax=ax[0,0],color='red', edgecolor='red')
ax[0,0].plot([x1,x1,x2,x2,x1],[y1,y2,y2,y1,y1],color='cyan',linewidth=2)
#-- add scale bar (0,3)
ax[0,3].set_xlim([0,wi])
ax[0,3].set_ylim([h,0])
ax[0,3].plot([10,10],[100,200],color='black',linewidth=2.)
ax[0,3].text(110,150,'10 km',horizontalalignment='center',\
verticalalignment='center', color='black')
ax[0,3].set_aspect('equal')
#-- plot other rows in a loop
for v,w in zip([1,2,3],[4,15,21]):
for i in range(n):
#-- determine the channel number based on total # of chanels
nch = int(model.layers[w].output_shape[-1]/4)
ax[v,i].matshow(activations[w][0, :, :, i*nch + 1], cmap=cmap)
# fig.colorbar(p,ax=ax[v,i])
ax[v,i].annotate(chr(lbl),get_axis_limits(ax[v,i]),bbox=bbox)
ax[v,i].set_aspect('equal')
ax[v,i].set_title('Ch.%i'%(i*nch + 2))
lbl += 1
if i != 0:
ax[v,i].get_xaxis().set_visible(False)
ax[v,i].get_yaxis().set_visible(False)
else:
ax[v,i].xaxis.set_ticks([0,len(activations[w][0,:,0,i-1])])
ax[v,i].yaxis.set_ticks([0,len(activations[w][0,0,:,i-1])])
#-- label rows
# ax[0,0].set_ylabel('Input', size='large')
ax[1,0].set_ylabel('Layer 5', size='large')
ax[2,0].set_ylabel('Layer 16', size='large')
ax[3,0].set_ylabel('Layer 22', size='large')
# fig.suptitle('Activation Maps')
plt.tight_layout()
plt.subplots_adjust(wspace=0.0, hspace=0.2)
plt.savefig(os.path.join(output_dir,'geocoded_v1','Test_predictions.dir',\
'%s.dir'%mod_str,'activation_map_%s.pdf'%fname),format='PDF')
plt.close(fig)
| 0.344554 | 0.640748 |
```
from lime.lime_image import *
import pandas as pd
import yaml
import os
import datetime
import dill
import cv2
import numpy as np
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from visualization.visualize import visualize_explanation
from predict import predict_instance, predict_and_explain
from preprocess import remove_text
pd.set_option('display.max_colwidth', -1)
def setup_lime():
'''
Load relevant information and create a LIME Explainer
:return: dict containing important information and objects for explanation experiments
'''
# Load relevant constants from project config file
cfg = yaml.full_load(open("C:\\Users\\PaulDS3\\Downloads\\project\\covid_cxr\\config.yml", 'r'))
lime_dict = {}
lime_dict['NUM_SAMPLES'] = cfg['LIME']['NUM_SAMPLES']
lime_dict['NUM_FEATURES'] = cfg['LIME']['NUM_FEATURES']
lime_dict['IMG_PATH'] = cfg['PATHS']['IMAGES']
lime_dict['RAW_DATA_PATH'] = cfg['PATHS']['RAW_DATA']
lime_dict['IMG_DIM'] = cfg['DATA']['IMG_DIM']
lime_dict['PRED_THRESHOLD'] = cfg['PREDICTION']['THRESHOLD']
lime_dict['CLASSES'] = cfg['DATA']['CLASSES']
lime_dict['CLASS_MODE'] = cfg['TRAIN']['CLASS_MODE']
lime_dict['COVID_ONLY'] = cfg['LIME']['COVID_ONLY']
KERNEL_WIDTH = cfg['LIME']['KERNEL_WIDTH']
FEATURE_SELECTION = cfg['LIME']['FEATURE_SELECTION']
# Load train and test sets
lime_dict['TRAIN_SET'] = pd.read_csv(cfg['PATHS']['TRAIN_SET'])
lime_dict['TEST_SET'] = pd.read_csv(cfg['PATHS']['TEST_SET'])
# Create ImageDataGenerator for test set
test_img_gen = ImageDataGenerator(preprocessing_function=remove_text,
samplewise_std_normalization=True, samplewise_center=True)
test_generator = test_img_gen.flow_from_dataframe(dataframe=lime_dict['TEST_SET'], directory=cfg['PATHS']['RAW_DATA'],
x_col="filename", y_col='label_str', target_size=tuple(cfg['DATA']['IMG_DIM']), batch_size=1,
class_mode='categorical', validate_filenames=False, shuffle=False)
lime_dict['TEST_GENERATOR'] = test_generator
# Define the LIME explainer
lime_dict['EXPLAINER'] = LimeImageExplainer(kernel_width=KERNEL_WIDTH, feature_selection=FEATURE_SELECTION,
verbose=True)
dill.dump(lime_dict['EXPLAINER'], open(cfg['PATHS']['LIME_EXPLAINER'], 'wb')) # Serialize the explainer
# Load trained model's weights
lime_dict['MODEL'] = load_model(cfg['PATHS']['MODEL_TO_LOAD'], compile=False)
#print(lime_dict)
return lime_dict
def explain_xray(lime_dict, idx, save_exp=True):
'''
Make a prediction and provide a LIME explanation
:param lime_dict: dict containing important information and objects for explanation experiments
:param idx: index of image in test set to explain
:param save_exp: Boolean indicating whether to save the explanation visualization
'''
# Get i'th preprocessed image in test set
lime_dict['TEST_GENERATOR'].reset()
for i in range(idx + 1):
x, y = lime_dict['TEST_GENERATOR'].next()
# entered .astype('double')
x = np.squeeze(x.astype('double'), axis=0)
#print(x.astype('double'))
# Get the corresponding original image (no preprocessing)
orig_img = cv2.imread(lime_dict['RAW_DATA_PATH'] + lime_dict['TEST_SET']['filename'][idx])
new_dim = tuple(lime_dict['IMG_DIM'])
orig_img = cv2.resize(orig_img, new_dim, interpolation=cv2.INTER_NEAREST) # Resize image
# Make a prediction for this image and retrieve a LIME explanation for the prediction
start_time = datetime.datetime.now()
#print('is this it?')
#print(lime_dict['MODEL'], lime_dict['EXPLAINER'], lime_dict['NUM_FEATURES'], lime_dict['NUM_SAMPLES'])
explanation, probs = predict_and_explain(x, lime_dict['MODEL'], lime_dict['EXPLAINER'], lime_dict['NUM_FEATURES'], lime_dict['NUM_SAMPLES'])
print("Explanation time = " + str((datetime.datetime.now() - start_time).total_seconds()) + " seconds")
# Get image filename and label
img_filename = lime_dict['TEST_SET']['filename'][idx]
label = lime_dict['TEST_SET']['label'][idx]
# Rearrange prediction probability vector to reflect original ordering of classes in project config
probs = [probs[0][lime_dict['CLASSES'].index(c)] for c in lime_dict['TEST_GENERATOR'].class_indices]
# Visualize the LIME explanation and optionally save it to disk
if save_exp:
file_path = lime_dict['IMG_PATH']
else:
file_path = None
if lime_dict['COVID_ONLY'] == True:
label_to_see = lime_dict['TEST_GENERATOR'].class_indices['COVID-19']
else:
label_to_see = 'top'
_ = visualize_explanation(orig_img, explanation, img_filename, label, probs, lime_dict['CLASSES'], label_to_see=label_to_see,
dir_path=file_path)
return
if __name__ == '__main__':
lime_dict = setup_lime()
i = 4 # originally 0 # Select i'th image in test set
explain_xray(lime_dict, i, save_exp=True) # Generate explanation for image
'rsna\\0ca3f31a-ace5-4bd2-9900-b152cc212c8e.jpg'.split('\\')[-1].split('.')[0]
```
|
github_jupyter
|
from lime.lime_image import *
import pandas as pd
import yaml
import os
import datetime
import dill
import cv2
import numpy as np
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from visualization.visualize import visualize_explanation
from predict import predict_instance, predict_and_explain
from preprocess import remove_text
pd.set_option('display.max_colwidth', -1)
def setup_lime():
'''
Load relevant information and create a LIME Explainer
:return: dict containing important information and objects for explanation experiments
'''
# Load relevant constants from project config file
cfg = yaml.full_load(open("C:\\Users\\PaulDS3\\Downloads\\project\\covid_cxr\\config.yml", 'r'))
lime_dict = {}
lime_dict['NUM_SAMPLES'] = cfg['LIME']['NUM_SAMPLES']
lime_dict['NUM_FEATURES'] = cfg['LIME']['NUM_FEATURES']
lime_dict['IMG_PATH'] = cfg['PATHS']['IMAGES']
lime_dict['RAW_DATA_PATH'] = cfg['PATHS']['RAW_DATA']
lime_dict['IMG_DIM'] = cfg['DATA']['IMG_DIM']
lime_dict['PRED_THRESHOLD'] = cfg['PREDICTION']['THRESHOLD']
lime_dict['CLASSES'] = cfg['DATA']['CLASSES']
lime_dict['CLASS_MODE'] = cfg['TRAIN']['CLASS_MODE']
lime_dict['COVID_ONLY'] = cfg['LIME']['COVID_ONLY']
KERNEL_WIDTH = cfg['LIME']['KERNEL_WIDTH']
FEATURE_SELECTION = cfg['LIME']['FEATURE_SELECTION']
# Load train and test sets
lime_dict['TRAIN_SET'] = pd.read_csv(cfg['PATHS']['TRAIN_SET'])
lime_dict['TEST_SET'] = pd.read_csv(cfg['PATHS']['TEST_SET'])
# Create ImageDataGenerator for test set
test_img_gen = ImageDataGenerator(preprocessing_function=remove_text,
samplewise_std_normalization=True, samplewise_center=True)
test_generator = test_img_gen.flow_from_dataframe(dataframe=lime_dict['TEST_SET'], directory=cfg['PATHS']['RAW_DATA'],
x_col="filename", y_col='label_str', target_size=tuple(cfg['DATA']['IMG_DIM']), batch_size=1,
class_mode='categorical', validate_filenames=False, shuffle=False)
lime_dict['TEST_GENERATOR'] = test_generator
# Define the LIME explainer
lime_dict['EXPLAINER'] = LimeImageExplainer(kernel_width=KERNEL_WIDTH, feature_selection=FEATURE_SELECTION,
verbose=True)
dill.dump(lime_dict['EXPLAINER'], open(cfg['PATHS']['LIME_EXPLAINER'], 'wb')) # Serialize the explainer
# Load trained model's weights
lime_dict['MODEL'] = load_model(cfg['PATHS']['MODEL_TO_LOAD'], compile=False)
#print(lime_dict)
return lime_dict
def explain_xray(lime_dict, idx, save_exp=True):
'''
Make a prediction and provide a LIME explanation
:param lime_dict: dict containing important information and objects for explanation experiments
:param idx: index of image in test set to explain
:param save_exp: Boolean indicating whether to save the explanation visualization
'''
# Get i'th preprocessed image in test set
lime_dict['TEST_GENERATOR'].reset()
for i in range(idx + 1):
x, y = lime_dict['TEST_GENERATOR'].next()
# entered .astype('double')
x = np.squeeze(x.astype('double'), axis=0)
#print(x.astype('double'))
# Get the corresponding original image (no preprocessing)
orig_img = cv2.imread(lime_dict['RAW_DATA_PATH'] + lime_dict['TEST_SET']['filename'][idx])
new_dim = tuple(lime_dict['IMG_DIM'])
orig_img = cv2.resize(orig_img, new_dim, interpolation=cv2.INTER_NEAREST) # Resize image
# Make a prediction for this image and retrieve a LIME explanation for the prediction
start_time = datetime.datetime.now()
#print('is this it?')
#print(lime_dict['MODEL'], lime_dict['EXPLAINER'], lime_dict['NUM_FEATURES'], lime_dict['NUM_SAMPLES'])
explanation, probs = predict_and_explain(x, lime_dict['MODEL'], lime_dict['EXPLAINER'], lime_dict['NUM_FEATURES'], lime_dict['NUM_SAMPLES'])
print("Explanation time = " + str((datetime.datetime.now() - start_time).total_seconds()) + " seconds")
# Get image filename and label
img_filename = lime_dict['TEST_SET']['filename'][idx]
label = lime_dict['TEST_SET']['label'][idx]
# Rearrange prediction probability vector to reflect original ordering of classes in project config
probs = [probs[0][lime_dict['CLASSES'].index(c)] for c in lime_dict['TEST_GENERATOR'].class_indices]
# Visualize the LIME explanation and optionally save it to disk
if save_exp:
file_path = lime_dict['IMG_PATH']
else:
file_path = None
if lime_dict['COVID_ONLY'] == True:
label_to_see = lime_dict['TEST_GENERATOR'].class_indices['COVID-19']
else:
label_to_see = 'top'
_ = visualize_explanation(orig_img, explanation, img_filename, label, probs, lime_dict['CLASSES'], label_to_see=label_to_see,
dir_path=file_path)
return
if __name__ == '__main__':
lime_dict = setup_lime()
i = 4 # originally 0 # Select i'th image in test set
explain_xray(lime_dict, i, save_exp=True) # Generate explanation for image
'rsna\\0ca3f31a-ace5-4bd2-9900-b152cc212c8e.jpg'.split('\\')[-1].split('.')[0]
| 0.532425 | 0.211987 |
**420-A58-SF - Algorithmes d'apprentissage non supervisé - Été 2021 - Spécialisation technique en Intelligence Artificielle**<br/>
MIT License - Copyright (c) 2021 Mikaël Swawola
<br/>

<br/>
**Objectif: Cette séance de travaux pratiques consiste en l'implémentation de l'algorithme Apriori pour l'apprentissage des règles d'association sur le mini jeu de données PanierEpicerie**
```
%reload_ext autoreload
%autoreload 2
%matplotlib inline
```
## 1 - Lecture du jeu de données
```
import numpy as np
import pandas as pd
from helpers import print_itemsets, print_rules
```
**Exercice 1-1 - À l'aide de la librairie Pandas, lire le fichier de données `PanierEpicerie.csv`**
```
# Compléter cette cellule ~ 1-2 lignes de code
```
**Exercice 1-2 - Combien d'items et de baskets sont contenus dans ce jeu de données ?**
```
# Votre réponse ici
```
**Exercice 1-3 - Convertissez le jeu de données en liste de transactions (ou baskets). Chaque basket est une liste d'items (ou article)**
```
# Compléter cette cellule ~ 1-2 lignes de code
```
## 2 - Librarie Mlxtend
La librairie [Mlxtend](http://rasbt.github.io/mlxtend/) (machine learning extensions) propose une implémentation de l'algorithme Apriori. Nous allons donc mettre en oeuvre différentes fonctionnalités de cette librairie
**Exercice 2-1 - À l'aide de la classe [TransactionEncoder](http://rasbt.github.io/mlxtend/user_guide/preprocessing/TransactionEncoder/), encodez la liste des transactions au format requis par Mlxtend**
```
# Compléter cette cellule ~ 3-4 lignes de code
```
**Exercice 2-2 - Identifier les itemsets fréquents pour un support de 20%. Référez vous à la documentation de Mlxtend pour trouver la classe requise**
```
# Compléter cette cellule ~ 2-3 lignes de code
```
**Exercice 2-3 - De la même manière, identifier maintenant les règles d'association ayant un indice de confiance de 0.3**
```
# Compléter cette cellule ~ 2-3 lignes de code
```
## 3 - Implémentation de l'algorithme Apriori (optionnel)
Le code ci-dessous représente une implémentation simple et de base (persque naïve) de l'algorihtme Apriori.
**Exercice 3-1 - À l'aide des éléments vus en cours, compléter les différentes méthodes de la classe `Apriori` et retrouvez les résultats de l'exercice 2. Seule la méthode `generate_L` vous est donnée. Des fonctions helpers aidant à l'affichage `print_itemsets`, `print_rules` sont importées**
```
class Apriori:
def __init__(self, transactions, min_support, min_confidence):
self.transactions = transactions # Baskets
self.min_support = min_support # Le seuil de support
self.min_confidence = min_confidence # La confiance minimale
self.support_data = {}
def create_C1(self):
# Completer le code ci-dessous ~ 2-5 lignes de code
return C1
def create_Ck(self, k, Lkprev):
# Completer le code ci-dessous ~ 15-20 lignes de code
return Ck
def generate_Lk_from_Ck(self, Ck):
# Completer le code ci-dessous ~ 15-20 lignes de code
return Lk
def generate_L(self):
"""
Génère tous les ensembles d'items fréquents
Input:
None
Output:
L: Liste des Lk.
"""
self.support_data = {}
C1 = self.create_C1()
L1 = self.generate_Lk_from_Ck(C1)
Lksub1 = L1.copy()
L = []
L.append(Lksub1)
i = 2
while True:
Ci = self.create_Ck(i, Lksub1)
Li = self.generate_Lk_from_Ck(Ci)
if Li:
Lksub1 = Li.copy()
L.append(Lksub1)
i += 1
else:
break
return L
def generate_rules(self):
L = self.generate_L()
# Completer le code ci-dessous ~ 15-20 lignes de code
return big_rule_list
model = Apriori(transactions, min_support=0.2, min_confidence=0.3)
L = model.generate_L()
print_itemsets(L, model)
rule_list = model.generate_rules()
print_rules(rule_list)
```
## Fin du TP
|
github_jupyter
|
%reload_ext autoreload
%autoreload 2
%matplotlib inline
import numpy as np
import pandas as pd
from helpers import print_itemsets, print_rules
# Compléter cette cellule ~ 1-2 lignes de code
# Votre réponse ici
# Compléter cette cellule ~ 1-2 lignes de code
# Compléter cette cellule ~ 3-4 lignes de code
# Compléter cette cellule ~ 2-3 lignes de code
# Compléter cette cellule ~ 2-3 lignes de code
class Apriori:
def __init__(self, transactions, min_support, min_confidence):
self.transactions = transactions # Baskets
self.min_support = min_support # Le seuil de support
self.min_confidence = min_confidence # La confiance minimale
self.support_data = {}
def create_C1(self):
# Completer le code ci-dessous ~ 2-5 lignes de code
return C1
def create_Ck(self, k, Lkprev):
# Completer le code ci-dessous ~ 15-20 lignes de code
return Ck
def generate_Lk_from_Ck(self, Ck):
# Completer le code ci-dessous ~ 15-20 lignes de code
return Lk
def generate_L(self):
"""
Génère tous les ensembles d'items fréquents
Input:
None
Output:
L: Liste des Lk.
"""
self.support_data = {}
C1 = self.create_C1()
L1 = self.generate_Lk_from_Ck(C1)
Lksub1 = L1.copy()
L = []
L.append(Lksub1)
i = 2
while True:
Ci = self.create_Ck(i, Lksub1)
Li = self.generate_Lk_from_Ck(Ci)
if Li:
Lksub1 = Li.copy()
L.append(Lksub1)
i += 1
else:
break
return L
def generate_rules(self):
L = self.generate_L()
# Completer le code ci-dessous ~ 15-20 lignes de code
return big_rule_list
model = Apriori(transactions, min_support=0.2, min_confidence=0.3)
L = model.generate_L()
print_itemsets(L, model)
rule_list = model.generate_rules()
print_rules(rule_list)
| 0.231962 | 0.922132 |
<img src="https://raw.githubusercontent.com/hse-econ-data-science/eds_spring_2020/master/sem05_parsing/image/take_all.png" height="300" width="1200">
# <center> Науки о данных <br> <br> Собираем данные в python </center>
## Agenda
* Азы всех азов
* Что делать, если сервер разозлился
* Что такое API
* Что такое Selenium
* Хитрости
# 1. Азы всех азов
Чтобы усвоить азы всех азов, прочитайте [статейку с хабра.](https://habr.com/ru/company/ods/blog/346632/) Там в соавторах один из семинаристов, что как бы намекает на то, что контент годный.
## Зачем собирать данные автоматически?
<br>
<br>
<center>
<img src="https://raw.githubusercontent.com/hse-econ-data-science/eds_spring_2020/master/sem05_parsing/image/aaaaaa.png" width="500">
## Что такое HTML?
**HTML (HyperText Markup Language)** — это такой же язык разметки как Markdown или LaTeX. Он является стандартным для написания различных сайтов. Команды в таком языке называются **тегами**. Если открыть абсолютно любой сайт, нажать на правую кнопку мышки, а после нажать `View page source`, то перед вами предстанет HTML скелет этого сайта.
HTML-страница это ни что иное, как набор вложенных тегов. Можно заметить, например, следующие теги:
- `<title>` – заголовок страницы
- `<h1>…<h6>` – заголовки разных уровней
- `<p>` – абзац (paragraph)
- `<div>` – выделения фрагмента документа с целью изменения вида содержимого
- `<table>` – прорисовка таблицы
- `<tr>` – разделитель для строк в таблице
- `<td>` – разделитель для столбцов в таблице
- `<b>` – устанавливает жирное начертание шрифта
Обычно команда `<...>` открывает тег, а `</...>` закрывает его. Все, что находится между этими двумя командами, подчиняется правилу, которое диктует тег. Например, все, что находится между `<p>` и `</p>` — это отдельный абзац.
Теги образуют своеобразное дерево с корнем в теге `<html>` и разбивают страницу на разные логические кусочки. У каждого тега есть свои потомки (дети) — те теги, которые вложены в него и свои родители.
Например, HTML-древо страницы может выглядеть вот так:
````
<html>
<head> Заголовок </head>
<body>
<div>
Первый кусок текста со своими свойствами
</div>
<div>
Второй кусок текста
<b>
Третий, жирный кусок
</b>
</div>
Четвёртый кусок текста
</body>
</html>
````
Можно работать с этим html как с текстом, а можно как с деревом. Обход этого дерева и есть парсинг веб-страницы. Мы всего лишь будем находить нужные нам узлы среди всего этого разнообразия и забирать из них информацию!
<center>
<img src="https://raw.githubusercontent.com/hse-econ-data-science/eds_spring_2020/master/sem05_parsing/image/tree.png" width="450">
## Качаем цены на книги
* Хотим собрать [цены на книги](http://books.toscrape.com)
* Руками долго, напишем код на петухоне
Доступ к веб-станицам позволяет получать модуль requests. Подгрузим его. Если у вас не установлен этот модуль, то придётся напрячься и установить: `pip install requests`.
```
import requests
url = 'http://books.toscrape.com/catalogue/page-1.html'
response = requests.get(url)
response
```
Благословенный 200 ответ - соединение установлено и данные получены, всё чудесно! Если попытаться перейти на несуществующую страницу, то можно получить, например, знаменитую ошибку 404.
```
requests.get('http://books.toscrape.com/big_scholarship')
```
Внутри response лежит html-разметка странички, которую мы парсим.
```
response.content[:1000]
```
Выглядит неудобоваримо, как насчет сварить из этого дела что-то покрасивее? Например, прекрасный суп.
<img align="center" src="https://raw.githubusercontent.com/hse-econ-data-science/eds_spring_2020/master/sem05_parsing/image/alisa.jpg" height="200" width="200">
Пакет **[`bs4`](https://www.crummy.com/software/BeautifulSoup/)**, a.k.a **BeautifulSoup** был назван в честь стишка про красивый суп из Алисы в стране чудес. Эта совершенно волшебная библиотека, которая из сырого и необработанного HTML (или XML) кода страницы выдаст вам структурированный массив данных, по которому очень удобно искать необходимые теги, классы, атрибуты, тексты и прочие элементы веб страниц.
> Пакет под названием `BeautifulSoup` — скорее всего, не то, что вам нужно. Это третья версия (*Beautiful Soup 3*), а мы будем использовать четвертую. Так что нам нужен пакет `beautifulsoup4`. Чтобы было совсем весело, при импорте нужно указывать другое название пакета — `bs4`, а импортировать функцию под названием `BeautifulSoup`. В общем, сначала легко запутаться, но эти трудности нужно преодолеть однажды, а потом будет проще.
```
from bs4 import BeautifulSoup
# распарсили страничку в дерево
tree = BeautifulSoup(response.content, 'html.parser')
```
Внутри переменной `tree` теперь лежит дерево из тегов, по которому мы можем совершенно спокойно бродить.
```
tree.html.head.title
```
Можно вытащить из того места, куда мы забрели, текст с помощью метода `text`.
```
tree.html.head.title.text
```
С текстом можно работать классическими питоновскими методами. Например, можно избавиться от лишних отступов.
```
tree.html.head.title.text.strip()
```
Более того, зная адрес элемента, мы сразу можем найти его. Например, вот так в коде страницы мы можем найти где именно для каждой книги лежит основная информация. Видно, что она находится внутри тега `article`, для которого прописан класс `product_pod` (грубо говоря, в html класс задаёт оформление соотвествующего кусочка страницы).
Вытащим инфу о книге из этого тега.
```
books = tree.find_all('article', {'class' : 'product_pod'})
books[0]
```
Полученный после поиска объект также обладает структурой bs4. Поэтому можно продолжить искать нужные нам объекты уже в нём.
```
type(books[0])
books[0].find('p', {'class': 'price_color'}).text
```
Обратите внимание, что для поиска есть как минимум два метода: `find` и `find_all`. Если несколько элементов на странице обладают указанным адресом, то метод `find` вернёт только самый первый. Чтобы найти все элементы с таким адресом, нужно использовать метод `find_all`. На выход будет выдан список.
Кроме содержимого у тегов часто есть атрибуты. Например, у названия книги есть атрибуты `title` и `href`:
```
books[0].h3
```
Их тоже можно вытащить.
```
books[0].h3.a.get('href')
books[0].h3.a.get('title')
```
А ещё по этим атрибутам можно искать интересующие нас кусочки страницы.
```
tree.find_all('a', {'title': 'A Light in the Attic'})
```
Собственно говоря, это всё.
Обратите внимание, что на сайте все книги лежат на разных страничках. Если попробовать потыкать их, можно заметить, что в ссылке будет меняться атрибут `page`. Значит, если мы хотим собрать все книги, надо создать кучу ссылок с разным `page` внутри цикла. Когда качаешь данные с более сложных сайтов, в ссылке часто есть огромное количество атрибутов, которые регулируют выдачу.
Давайте запишем весь код для сбора книг в виде функции. На вход она будет принимать номер странички, которую надо скачать.
```
def get_page(p):
# изготовили ссылку
url = 'http://books.toscrape.com/catalogue/page-{}.html'.format(p)
# сходили по ней
response = requests.get(url)
# построили дерево
tree = BeautifulSoup(response.content, 'html.parser')
# нашли в нём всё самое интересное
books = tree.find_all('article', {'class' : 'product_pod'})
infa = [ ]
for book in books:
infa.append({'price': book.find('p', {'class': 'price_color'}).text,
'href': book.h3.a.get('href'),
'title': book.h3.a.get('title')})
return infa
```
Осталось только пройтись по всем страничкам от page-1 до page-50 циклом и данные у нас в кармане.
```
infa = []
for p in range(1,51):
infa.extend(get_page(p))
import pandas as pd
df = pd.DataFrame(infa)
print(df.shape)
df.head()
```
Кстати говоря, если перейти по ссылке в саму книгу, там о ней будет куча дополнительной информации. Можно пройтись по всем ссылкам и выкачать себе по ним дополнительную информацию.
# 2. Что делать, если сервер разозлился
* Вы решили собрать себе немного данных
* Сервер не в восторге от ковровой бомбардировки автоматическими запросами
* Error 403, 404, 504, $\ldots$
* Капча, требования зарегистрироваться
* Заботливые сообщения, что с вашего устройства обнаружен подозрительный трафик
<center>
<img src="https://raw.githubusercontent.com/hse-econ-data-science/eds_spring_2020/master/sem05_parsing/image/doge.jpg" width="450">
## а) быть терпеливым
* Слишком частые запросы раздражают сервер
* Ставьте между ними временные задержки
```
import time
time.sleep(3) # и пусть весь мир подождёт 3 секунды
```
## б) быть похожим на человека
Запрос нормального человека через браузер выглядит так:
<center>
<img src="https://raw.githubusercontent.com/hse-econ-data-science/eds_spring_2020/master/sem05_parsing/image/browser_get.png" width="600">
С ним на сервер попадает куча информации! Запрос от питона выглядит так:
<center>
<img src="https://raw.githubusercontent.com/hse-econ-data-science/eds_spring_2020/master/sem05_parsing/image/python_get.jpg" width="250">
Заметили разницу? Очевидно, что нашему скромному запросу не тягаться с таким обилием мета-информации, которое передается при запросе из обычного браузера. К счастью, никто нам не мешает притвориться человечными и пустить пыль в глаза сервера при помощи генерации фейкового юзер-агента. Библиотек, которые справляются с такой задачей, существует очень и очень много, лично мне больше всего нравится [fake-useragent.](https://pypi.org/project/fake-useragent/) При вызове метода из различных кусочков будет генерироваться рандомное сочетание операционной системы, спецификаций и версии браузера, которые можно передавать в запрос:
```
from fake_useragent import UserAgent
UserAgent().chrome
```
Например, https://knowyourmeme.com/ не захочет пускать к себе python и выдаст ошибку 403. Она выдается сервером, если он доступен и способен обрабатывать запросы, но по некоторым личным причинам отказывается это делать.
```
url = 'https://knowyourmeme.com/'
response = requests.get(url)
response
```
А если сгенерировать User-Agent, вопросов у сервера не возникнет.
```
response = requests.get(url, headers={'User-Agent': UserAgent().chrome})
response
```
__Другой пример:__ если захотите спарсить ЦИАН, он начнет вам выдавать капчу. Один из вариантов обхода: менять ip через тор. Однако на практически каждый запрос из-под тора, ЦИАН будет выдавать капчу. Если добавить в запрос `User_Agent`, то капча будет вылезать намного реже.
## в) общаться через посредников
<center>
<img src="https://raw.githubusercontent.com/hse-econ-data-science/eds_spring_2020/master/sem05_parsing/image/proxy.jpeg" width="400">
Посмотрим на свой ip-адрес без прокси.
```
r = requests.get('https://httpbin.org/ip')
print(r.json())
```
А теперь попробуем посмотреть, что будет если подключить прокси.
```
proxies = {
'http': '182.53.206.47:47592',
'https': '182.53.206.47:47592'
}
r = requests.get('https://httpbin.org/ip', proxies=proxies)
print(r.json())
```
Запрос работал немного подольше, ip адрес сменился. Большая часть проксей, которые вы найдёте работают криво. Иногда запрос идёт очень долго и выгоднее сбросить его и попробовать другую проксю. Это можно настроить опцией `timeout`. Например, так если сервер не будет отвечать секунду, код упадёт.
```
import requests
requests.get('http://www.google.com', timeout=1)
```
У requests есть довольно много разных интересных примочек. Посмотреть на них можно в [гайде из документации.](https://requests.readthedocs.io/en/master/user/advanced/)
__Где можно попытаться раздобыть списки прокси:__
* https://qna.habr.com/q/591069
* https://getfreeproxylists.blogspot.com/
* Большая часть бесплатных прокси обычно не работает. Пишите парсер, который будет собирать списки из проксей и пытаться применить их.
## г) уходить глубже
<center>
<img src="https://raw.githubusercontent.com/hse-econ-data-science/eds_spring_2020/master/sem05_parsing/image/tor.jpg" width="600">
Можно попытаться обходить злые сервера через тор. Есть аж несколько способов, но мы про это говорить не будем. Лучше подробно почитать в статье на Хабре. Ссылка на неё в конце тетрадки. Ещё в самом начале была. А ещё в середине [наверняка есть.](https://habr.com/ru/company/ods/blog/346632/)
## Совместить всё?
1. Начните с малого
2. Если продолжает банить, накидывайте новые примочки
3. Каждая новая примочка бьёт по скорости
4. [Разные примочки для requests](http://docs.python-requests.org/en/v0.10.6/user/advanced/)
# 3. API
__API (Application Programming Interface__ — это уже готовый код, который можно всунуть в свой код! Многие сервисы, в том числе Google и Вконтакте, предоставляют свои уже готовые решения для вашей разработки.
Примеры:
* [Контактовский API](https://vk.com/dev/methods)
* [API twitter](https://developer.twitter.com/en/docs.html)
* [API youtube](https://developers.google.com/youtube/v3/)
* [API google maps](https://developers.google.com/maps/documentation/)
* [Aviasales](https://www.aviasales.ru/API)
* [Yandex Translate](https://yandex.ru/dev/translate/)
Оно есть почти везде! На этом семинаре мы посмотрим на два примера: на API контакта и google maps.
## 3.1 API vk
Зачем может понадобиться доступ к API контакта, думаю, объяснять не надо. Социальная сетка — это тонны различной полезной информации, которую можно заиспользовать для своих целей. [В документации](https://vk.com/dev/manuals) очень подробно описано как можно работать с API контакта и к чему это приводит.
Но для начала к API нужно получить доступ. Для этого придётся пройти пару бюрократических процедур (о, боже, эти два предложения были так бюрократически сформулированы, что мне захотелось отстоять в очереди).
Первая такая процедура заключается в создании своего приложения. Для этого переходим по [ссылке](http://vk.com/editapp?act=create) и проходимся по необходимым шагам:
<img align="center" src="https://raw.githubusercontent.com/hse-econ-data-science/eds_spring_2020/master/sem05_parsing/image/app_creation_1.png" width="500">
После подтверждения своей личности по номеру телефона, попадаем на страницу свежесозданного приложения
<img align="center" src="https://raw.githubusercontent.com/hse-econ-data-science/eds_spring_2020/master/sem05_parsing/image/app_creation_2.png" width="500">
Слева нам будем доступна вкладка с настройками, перейдя в неё мы увидим все необходимые нам для работы с приложением параметры:
<img align="center" src="https://raw.githubusercontent.com/hse-econ-data-science/eds_spring_2020/master/sem05_parsing/image/app_creation_3.png" width="500">
Отсюда в качестве токена можно забрать сервисный ключ доступа. Для работы с частью методов API этого вполне достаточно (обычно в заголовке такого метода стоит соответствующая пометка). Иногда нужны дополнительные доступы. Для того, чтобы получить их, необходимо сделать ещё пару странных манипуляций:
Переходим по ссылке вида (на месте звездочек должен стоять ID созданного вами приложения):
> https://oauth.vk.com/authorize?client_id=**********&scope=8198&redirect_uri=https://oauth.vk.com/blank.html&display=page&v=5.16&response_type=token
В итоге по этому запросу будет сформирована ссылка следующего вида:
> https://oauth.vk.com/blank.html#access_token=25b636116ef40e0718fe4d9f382544fc28&expires_in=86400&user_id=*******
Первый набор знаков — `access token`, т.е. маркер доступа. Вторая цифра (`expires_in=`) время работы маркера доступа в секундах (одни сутки). По истечению суток нужно будет получить новый маркер доступа. Последняя цифра (`user_id=`) ваш ID Вконтакте. Нам в дальнейшем понадобится маркер доступа. Для удобства сохраним его в отдельном файле или экспортируем в глобальную область видимости. В целях безопасности ваших данных не стоит нигде светить токенами и тем более выкладывать их в открытый доступ. __Так можно и аккаунта случайно лишиться.__ Берегите токен смолоду.
Обратите внимание на ссылку, по которой мы делали запрос на предоставление токена. Внутри неё находится странный параметр `scope=8198.` Это мы просим доступ к конкретным разделам. Подробнее познакомиться с взаимно-однозначным соответствием между числами и правами можно [в документации.](https://vk.com/dev/permissions) Например, если мы хотим получить доступ к друзьям, фото и стенам, мы подставим в scope цифру 2+4++8192=8198.
```
# мой номер странички
myid = '' # вставить номер странички
# версия используемого API
version = '5.103'
# подгружаем токен из файлика на компьютере
with open('secret_token.txt') as f:
token = f.read()
```
Чтобы скачать что-то из контакта, надо сделать ссылку и сходить по ней пакетом `requests`. Ссылка должна будет включать в себя метод (что мы просим у вк) и параметры (насколько много и как именно). Мы будем просто заменять эти две штуки и выкачивать разные вещи.
```
method = 'users.get'
parameters = 'user_ids='
url = 'https://api.vk.com/method/' + method + '?' + parameters + '&v=' + version + '&access_token=' + token
response = requests.get(url)
response.json()
```
В ответ на наш запрос vk выкидывает JSON с информацией. JSON очень похож на птонячие словарики. Смысл квадратных и фигурных скобок такой же. Правда, есть и отличия: например, в Python одинарные и двойные кавычки ничем не отличаются, а в JSON можно использовать только двойные.
Мы видим, что полученный нами JSON представляет собой словарь, значения которого — строки или числа, а также списки или словари, значения которых в свою очередь также могут быть строками, числами, списками, словарями и т.д. То есть получается такая довольно сложная структура данных, из которой можно вытащить всё то, что нас интересует.
```
response.json()['response'][0]['first_name']
```
[В документации](https://vk.com/dev/manuals) очень подробно описано какие есть методы и какие у них бывают параметры. Давайте завернём код выше в функцию и попробуем что-нибудь скачать.
```
def vk_download(method, parameters):
url = 'https://api.vk.com/method/' + method + '?' + parameters + '&access_token=' + token + '&v=' + version
response = requests.get(url)
infa = response.json()
return infa
```
Например, все лайки с [хайер скул оф мемс.](https://vk.com/hsemem)
```
group_id = '-51126445' # взяли из ссылки на группу
wall = vk_download('wall.get', 'owner_id={}&count=100'.format(group_id))
wall = wall['response']
wall['items'][0]
wall['items'][0]['likes']['count']
likes = [item['likes']['count'] for item in wall['items']]
likes[:10]
```
За один запрос скачалось всего-лишь $100$ постов с лайками. В паблике их целых
```
wall['count']
```
[Документация](https://vk.com/dev/manuals) говорит, что есть параметр `offset`, с помощью которого можно указать какие именно посты из группы нужно скачать. Например, если мы укажем `offset = 100`, скачается вторая сотня. Наше дело за малым: написать цикл.
```
import time
likes = [ ] # сюда буду сохранять лайки
for offset in range(0, 4800, 100):
time.sleep(0.4) # вк согласен работать 3 раза в секунду,
# между запросами python спит 0.4 секунды
wall = vk_download('wall.get', 'owner_id={}&count=100&offset={}'.format(group_id, offset))
likes.extend([item['likes']['count'] for item in wall['response']['items']])
```
Лайки в наших руках. Можем даже посмотреть на их распределение и попробовать что-то с ними сделать.
```
len(likes)
import matplotlib.pyplot as plt
plt.hist(likes);
```
В принципе похожим образом можно скачать что угодно. Обратите внимание, что у вк есть специальный метод [`execute`,](https://vk.com/dev/execute) который иногда помогает ускорить скачку в $25$ раз. [В этом очень старом туториале](https://github.com/DmitrySerg/OpenData/blob/master/RussianElections2018/Part_1_Parsing_VK.ipynb) даже есть пример использования.
## 3.2 API Google maps
API для карт может понадобиться для различных полугеографических исследований. Например, мы хотим проверить гипотезу о том, что хороший кофе повышает цену квартиры. Одним из регрессоров хотим взять число кофеен в окрестностях. Это количество кофеен надо откуда-то взять. Google maps вам в помощь!
Снова всё начинается с [получения ключа.](https://developers.google.com/maps/documentation/directions/start) Тут всё намного проще. Переходим по ссылке, жмём Get started, соглашаемся со всем, кроме оплаты. Получаем ключ доступа, сохраняем его в файлик рядом с блокнотом.
```
# подгружаем токен
with open('google_token.txt') as f:
google_token = f.read()
```
Формируем ссылку для запроса по заветам [документации](https://developers.google.com/maps/documentation) и получаем ответ в виде JSON.
```
mainpage = 'https://maps.googleapis.com/maps/api/place/nearbysearch/json?'
location = '55.86,37.54'
radius = '3000'
keyword = 'кофейня'
parameters = 'location='+location+'&radius='+radius+'&keyword='+keyword+'&language=ru-Ru'+'&key='+ google_token
itog_url = mainpage + parameters
itog_url
response = requests.get(itog_url)
response.json()['results'][0]
```
Из json по сотвествующим ключам тащим самое интересное. Например, названия заведений:
```
[item['name'] for item in response.json()['results']]
```
[В этом старом недописанном гайде](https://nbviewer.jupyter.org/github/FUlyankin/Parsers/blob/master/Parsers%20/Google_maps_API.ipynb) есть ещё пара примеров по работе с google maps.
# 4. Selenium
Это инструмент для роботизированного управления браузером. Для его коректной работы нужно скачать драйвер: [для хрома](https://sites.google.com/a/chromium.org/chromedriver/downloads) или [для фаерфокса.](https://github.com/mozilla/geckodriver/releases)
```
from selenium import webdriver
driver = webdriver.Firefox()
```
После выполнения верхнего блока у вас откроется ещё один браузер. Можно пойти в нём на стартовую гугла.
```
ref = 'http://google.com'
driver.get(ref)
```
Найти по html-коду строку для ввода запроса, кликнуть на неё.
```
stroka = driver.find_element_by_name("q")
stroka.click()
```
Написать в неё что-нибудь.
```
stroka.send_keys('Вконтакте')
```
Найти кнопку для гугления и нажать её.
```
# находим кнопку для гугления и жмём её
button = driver.find_element_by_name('btnK')
button.click()
```
У нас на стринчке есть поисковая выдача. Заберём её в bs4 и найдём все сайты.
```
bs = BeautifulSoup(driver.page_source)
dirty_hrefs = bs.find_all('h3',attrs={'class':'r'})
clean_hrefs = [href.a['href'] for href in dirty_hrefs]
clean_hrefs
```
Закроем браузер.
```
driver.close()
```
Вообще selenium придумывали для тестировщиков, а не для парсинга. Для парсеров имеет смысл использовать только в крайнем случае. Он очень медленный. Если у вас очень-очень-очень-очень не получается обмануть сервер через requests или вы сталкиваетесь с какой-то специфической защитой от ботов, seleium может помочь. Ещё для selenium __важно__ не забывать ставить временные задержки, чтобы страница успевала прогрузиться. Либо можно дописывать п
олноценные код, который будет ждать прогрузки и только тогда тыкать на кнопки и тп.
Есть [перевод на русский документации на хабре.](https://habr.com/ru/post/248559/)
В моей практике полезен был пару раз:
* Надо было скачать много инфы о поисковых запросах из [Google Trends,](https://trends.google.ru/trends/?geo=RU) а API сильно ограничивал меня.
* Надо было понять через поисковик какой у различных организаций ИНН по их наименованию (помогло только для крупных компаний)
# 5. Хитрости:
### Хитрость 1: Не стесняйтесь пользоваться `try-except`
Эта конструкция позволяет питону в случае ошибки сделать что-нибудь другое либо проигнорировать её. Например, мы хотим найти логарифм от всех чисел из списка:
```
from math import log
a = [1,2,3,-1,-5,10,3]
for item in a:
print(log(item))
```
У нас не выходит, так как логарифм от отрицательных чисел не берётся. Чтобы код не падал при возникновении ошибки, мы можем его немного изменить:
```
from math import log
a = [1,2,3,-1,-5,10,3]
for item in a:
try:
print(log(item)) # попробуй взять логарифм
except:
print('я не смог') # если не вышло, сознайся и работай дальше
```
__Как это использовать при парсинге?__ Интернет создаёт человек. У многих людей руки очень кривые. Предположим, что мы на ночь поставили парсер скачивать цены, он отработал час и упал из-за того, что на како-нибудь одной странице были криво проставлены теги, либо вылезло какое-то редкое поле, либо вылезли какие-то артефакты от старой версии сайта, которые не были учтены в нашем парсере. Гораздо лучше, чтобы код проигнорировал эту ошибку и продолжил работать дальше.
### Хитрость 2: pd.read_html
Если на странице, которую вы спарсили, среди тэгов `<tr>` и `<td>` прячется таблица, чаще всего можно забрать её себе без написания цикла, который будет перебирать все стобцы и строки. Поможет в этом `pd.read_html`. Например, вот так можно забрать себе [табличку с сайта ЦБ](https://cbr.ru/currency_base/daily/)
```
import pandas as pd
df = pd.read_html('https://cbr.ru/currency_base/daily/', header=-1)[0]
df.head()
```
Команда пытается собрать в массив все таблички c веб-страницы. Если хочется, можно сначала через bs4 найти нужную таблицу, а потом уже распарсить её:
```
resp = requests.get('https://cbr.ru/currency_base/daily/')
tree = BeautifulSoup(resp.content, 'html.parser')
# нашли табличку
table = tree.find_all('table', {'class' : 'data'})[0]
# распарсили её
df = pd.read_html(str(table), header=-1)[0]
df.head()
```
### Хитрость 3: используйте пакет tqdm
> Код уже работает час. Я вообще без понятия когда он закончит работу. Было бы круто узнать, сколько ещё ждать...
Если в вашей голове возникла такая мысль, пакет `tqdm` ваш лучший друг. Установите его: ```pip install tqdm```
```
from tqdm import tqdm_notebook
a = list(range(30))
# 30 раз будем спать по секунде
for i in tqdm_notebook(a):
time.sleep(1)
```
Мы обмотали тот вектор, по которому идёт цикл в `tqdm_notebook`. Это даёт нам красивую зелёную строку, которая показывает насколько сильно мы продвинулись по коду. Обматывайте свои самые большие и долгие циклы в `tqdm_notebook` и всегда понимайте сколько осталось до конца.
### Хитрость 4: распаралеливание
Если сервер не очень настроен вас банить, можно распаралелить свои запросы к нему. Самый простой способ сделать это — библиотека `joblib`.
```
from joblib import Parallel, delayed
from tqdm import tqdm_notebook
def simple_function(x):
return x**2
nj = -1 # паралель на все ядра
result = Parallel(n_jobs=nj)(
delayed(simple_function)(item) # какую функцию применяем
for item in tqdm_notebook(range(10))) # к каким объектам применям
# tqdm_notebook в последней строчке будет создавать зелёный бегунок с прогрессом
```
На самом деле это не самый эффективный способ паралелить в python. Он жрёт много памяти и работает медленнее, чем [стандартный multiprocessing.](https://docs.python.org/3/library/multiprocessing.html) Но зато две строчки, КАРЛ! Две строчки!
### Хитрость 5: selenium без браузера
Селениум можно настроить так, чтобы физически браузер не открывался.
```
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
options = Options()
options.headless = True
driver = webdriver.Firefox(options=options)
ref = 'http://google.com'
driver.get(ref)
driver.close()
```
### Ещё хитрости:
* __Сохраняйте то, что парсите по мере скачки!__ Прямо внутрь цикла запихните код, который сохраняет файл!
* Когда код упал в середине списка для скачки, не обязательно запускать его с самого начала. Просто сохраните тот кусок, который уже скачался и дозапустите код с места падения.
* Засовывать цикл для обхода ссылок внутрь функции - не самая хорошая идея. Предположим, что надо обойти $100$ ссылок. Функция должна вернуть на выход объекты, которые скачались по всему этому добру. Она берёт и падает на $50$ объекте. Конечно же то, что уже было скачано, функция не возвращает. Всё, что вы накачали - вы теряете. Надо запускать заново. Почему? Потому что внутри функции своё пространство имён. Если бы вы делали это циклом влоб, то можно было бы сохранить первые $50$ объектов, которые уже лежат внутри листа, а потом продолжить скачку.
* Можно ориентироваться на html-страничке с помощью `xpath`. Он предназначен для того, чтобы внутри html-странички можно было быстро находить какие-то элементы. [Подробнее можно почитать тут.](https://devhints.io/xpath)
* Не ленитесь листать документацию. Из неё можно узнать много полезных штук.
# 6. Почиташки
* [Парсим мемы в python](https://habr.com/ru/company/ods/blog/346632/) - подробная статья на Хабре, по которой можно научиться ... парсить (ВНЕЗАПНО)
* [Тетрадки Ильи Щурова](https://github.com/ischurov/pythonhse) про python для анализа данных. В [лекции 9](https://nbviewer.jupyter.org/github/ischurov/pythonhse/blob/master/Lecture%209.ipynb) и [лекции 10](https://nbviewer.jupyter.org/github/ischurov/pythonhse/blob/master/Lecture%2010.ipynb) есть про парсеры.
* [Книга про парсинг](https://github.com/FUlyankin/Parsers/blob/master/Ryan_Mitchell_Web_Scraping_with_Python-_Collecting_Data_from_the_Modern_Web_2015.pdf) на случай если вам совсем скучно и хочется почитать что-то длинное и на английском
* [Продвинутое использование requests](https://2.python-requests.org/en/master/user/advanced/)
* [Перевод документации по selenium на русский на хабре](https://habr.com/ru/post/248559/)
* [Страничка с парсерами одного из семинаристов,](https://fulyankin.github.io/Parsers/) на ней много недописанного и кривого, но есть и интересное:
* [Более подробно про selenium](https://nbviewer.jupyter.org/github/FUlyankin/Parsers/blob/master/sems/3_Selenium_and_Tor/4.1%20Selenium%20.ipynb)
* [Немного устаревший гайд по парсинг вконтакте](https://nbviewer.jupyter.org/github/FUlyankin/ekanam_grand_research/blob/master/0.%20vk_parser_tutorial.ipynb)
* [Немного устаревший гайд про google maps](https://nbviewer.jupyter.org/github/FUlyankin/Parsers/blob/master/Parsers%20/Google_maps_API.ipynb)
|
github_jupyter
|
<html>
<head> Заголовок </head>
<body>
<div>
Первый кусок текста со своими свойствами
</div>
<div>
Второй кусок текста
<b>
Третий, жирный кусок
</b>
</div>
Четвёртый кусок текста
</body>
</html>
import requests
url = 'http://books.toscrape.com/catalogue/page-1.html'
response = requests.get(url)
response
requests.get('http://books.toscrape.com/big_scholarship')
response.content[:1000]
from bs4 import BeautifulSoup
# распарсили страничку в дерево
tree = BeautifulSoup(response.content, 'html.parser')
tree.html.head.title
tree.html.head.title.text
tree.html.head.title.text.strip()
books = tree.find_all('article', {'class' : 'product_pod'})
books[0]
type(books[0])
books[0].find('p', {'class': 'price_color'}).text
books[0].h3
books[0].h3.a.get('href')
books[0].h3.a.get('title')
tree.find_all('a', {'title': 'A Light in the Attic'})
def get_page(p):
# изготовили ссылку
url = 'http://books.toscrape.com/catalogue/page-{}.html'.format(p)
# сходили по ней
response = requests.get(url)
# построили дерево
tree = BeautifulSoup(response.content, 'html.parser')
# нашли в нём всё самое интересное
books = tree.find_all('article', {'class' : 'product_pod'})
infa = [ ]
for book in books:
infa.append({'price': book.find('p', {'class': 'price_color'}).text,
'href': book.h3.a.get('href'),
'title': book.h3.a.get('title')})
return infa
infa = []
for p in range(1,51):
infa.extend(get_page(p))
import pandas as pd
df = pd.DataFrame(infa)
print(df.shape)
df.head()
import time
time.sleep(3) # и пусть весь мир подождёт 3 секунды
from fake_useragent import UserAgent
UserAgent().chrome
url = 'https://knowyourmeme.com/'
response = requests.get(url)
response
response = requests.get(url, headers={'User-Agent': UserAgent().chrome})
response
r = requests.get('https://httpbin.org/ip')
print(r.json())
proxies = {
'http': '182.53.206.47:47592',
'https': '182.53.206.47:47592'
}
r = requests.get('https://httpbin.org/ip', proxies=proxies)
print(r.json())
import requests
requests.get('http://www.google.com', timeout=1)
# мой номер странички
myid = '' # вставить номер странички
# версия используемого API
version = '5.103'
# подгружаем токен из файлика на компьютере
with open('secret_token.txt') as f:
token = f.read()
method = 'users.get'
parameters = 'user_ids='
url = 'https://api.vk.com/method/' + method + '?' + parameters + '&v=' + version + '&access_token=' + token
response = requests.get(url)
response.json()
response.json()['response'][0]['first_name']
def vk_download(method, parameters):
url = 'https://api.vk.com/method/' + method + '?' + parameters + '&access_token=' + token + '&v=' + version
response = requests.get(url)
infa = response.json()
return infa
group_id = '-51126445' # взяли из ссылки на группу
wall = vk_download('wall.get', 'owner_id={}&count=100'.format(group_id))
wall = wall['response']
wall['items'][0]
wall['items'][0]['likes']['count']
likes = [item['likes']['count'] for item in wall['items']]
likes[:10]
wall['count']
import time
likes = [ ] # сюда буду сохранять лайки
for offset in range(0, 4800, 100):
time.sleep(0.4) # вк согласен работать 3 раза в секунду,
# между запросами python спит 0.4 секунды
wall = vk_download('wall.get', 'owner_id={}&count=100&offset={}'.format(group_id, offset))
likes.extend([item['likes']['count'] for item in wall['response']['items']])
len(likes)
import matplotlib.pyplot as plt
plt.hist(likes);
# подгружаем токен
with open('google_token.txt') as f:
google_token = f.read()
mainpage = 'https://maps.googleapis.com/maps/api/place/nearbysearch/json?'
location = '55.86,37.54'
radius = '3000'
keyword = 'кофейня'
parameters = 'location='+location+'&radius='+radius+'&keyword='+keyword+'&language=ru-Ru'+'&key='+ google_token
itog_url = mainpage + parameters
itog_url
response = requests.get(itog_url)
response.json()['results'][0]
[item['name'] for item in response.json()['results']]
from selenium import webdriver
driver = webdriver.Firefox()
ref = 'http://google.com'
driver.get(ref)
stroka = driver.find_element_by_name("q")
stroka.click()
stroka.send_keys('Вконтакте')
# находим кнопку для гугления и жмём её
button = driver.find_element_by_name('btnK')
button.click()
bs = BeautifulSoup(driver.page_source)
dirty_hrefs = bs.find_all('h3',attrs={'class':'r'})
clean_hrefs = [href.a['href'] for href in dirty_hrefs]
clean_hrefs
driver.close()
from math import log
a = [1,2,3,-1,-5,10,3]
for item in a:
print(log(item))
from math import log
a = [1,2,3,-1,-5,10,3]
for item in a:
try:
print(log(item)) # попробуй взять логарифм
except:
print('я не смог') # если не вышло, сознайся и работай дальше
import pandas as pd
df = pd.read_html('https://cbr.ru/currency_base/daily/', header=-1)[0]
df.head()
resp = requests.get('https://cbr.ru/currency_base/daily/')
tree = BeautifulSoup(resp.content, 'html.parser')
# нашли табличку
table = tree.find_all('table', {'class' : 'data'})[0]
# распарсили её
df = pd.read_html(str(table), header=-1)[0]
df.head()
Мы обмотали тот вектор, по которому идёт цикл в `tqdm_notebook`. Это даёт нам красивую зелёную строку, которая показывает насколько сильно мы продвинулись по коду. Обматывайте свои самые большие и долгие циклы в `tqdm_notebook` и всегда понимайте сколько осталось до конца.
### Хитрость 4: распаралеливание
Если сервер не очень настроен вас банить, можно распаралелить свои запросы к нему. Самый простой способ сделать это — библиотека `joblib`.
На самом деле это не самый эффективный способ паралелить в python. Он жрёт много памяти и работает медленнее, чем [стандартный multiprocessing.](https://docs.python.org/3/library/multiprocessing.html) Но зато две строчки, КАРЛ! Две строчки!
### Хитрость 5: selenium без браузера
Селениум можно настроить так, чтобы физически браузер не открывался.
| 0.183703 | 0.946794 |
# Dictionnaires
Un dictionnaire est un type interne de Python qui ressemble à une liste, mais plus puissant. Dans une liste les indices sont des entiers. Dans un dictionnaire les indices peuvent être de n'importe quel type immuable (entier, nombre, texte, tuple, ...).

La fonction `dict` crée un dictionnaire vide. On pourrait aussi le créer avec une paire d'accolades `{}`.
```
d = dict()
d
```
## Mémoire associative
Un dictionnaire associe une **clé** à une **valeur**. Cette structure est aussi appelé
- mémoire associtive
- table de hachage
Ce type de données est standard dans les languages récentes (JavaScript, Python, Ruby) mais absent les langages plus anciens (C, Fortran).
Pour ajouter un élément à un dictionnaire, nous utilisons une expression de la forme `d[clé] = valeur`. Comme pour un 'vrai' dictionnaire qui associe des mots de deux langues, nous pouvons définir ceci.
```
d['un'] = 'one'
d['deux'] = 'two'
d
```
Un dictionnaire est représenté par ceci:
- il est délimité par des accolades (`{}`)
- ses éléments sont séparés par des virgules (`,`)
- les paires **clé:valeur** sont séparé par un deux-point (`:`)
Pour accéder à une valueur, nous utilisons un index entre crochets (`[]`) comme dans une liste.
```
d['deux']
```
Des nouveaux éléments peuvent être ajouté dans n'importer quel ordre.
```
d['dix'] = 'ten'
d
```
Avec la fonction `len` nous trouvons la taille du dictionnaire.
```
len(d)
```
Le mot-clé `in` permet de tester si un élément fait partie du dictionnaire.
```
'deux' in d
```
Attention, le test est fait avec les clés, et non pas avec les valeurs.
```
'two' in d
```
## Compter (histogramme)
Un dictionnaire est une structure idéale pour compter les éléments appartenant à différentes catégories. Par exemple si nous voulons compter l'apparence de chaque lettre dans un texte nous pouvons utiliser un dictionnaire.
```
phrase = 'dictionnaire'
d = {}
for c in phrase:
if c in d:
d[c] += 1
else:
d[c] = 1
d
```
On appelle cette structure aussi un **histogramme**. Il nous montre que
- la lettre **d** apparait une fois,
- la lettre **i** apparait trois fois.
La fonction `get` permet d'obtenir une valeur par défaut, si la clé n'existe pas encore dans le dictionnaire. Par exemple la lettre **b** n'est pas une clé du dictionnaire. La fonction retourne alors sa valeur par défaut qui est 0.
```
d.get('b', 0)
```
Ceci nous permet de raccourcir le programme du histogramme encore plus.
```
d = {}
for c in phrase:
d[c] = d.get(c, 0) + 1
d
```
## Itération sur les clés
Nous pouvons itérer sur les clés d'un dictionnaire.
```
for c in d:
print(c, d[c])
```
Transformer un dictionnaire en liste nous retourne une liste de ses clés.
```
list(d)
```
La fonction `sorted` nous retourne une liste de ses clés, triée.
```
sorted(d)
```
## List et ensemble des valeurs
La méthode `values` retourne toutes les valeurs.
```
d.values()
```
Nous pouvons les transformer en liste ordinaire.
```
list(d.values())
```
Nous pouvons également la transformer en **ensemble** et éliminer les doublons.
```
set(d.values())
```
## Inverser un dictionnaire
Toutes les clés d'un dictionnaire sont unique. Par contre, il est tout à fait possible d'avoir multiples valeurs qui sont identiques. Si nous inversons un dictionnaire, une valeur peut correspondre à multiple clés, que nous pouvons représenter comme liste.
```
inverse = {}
for c in d:
val = d[c]
if val in inverse:
inverse[val].append(c)
else:
inverse[val] = [c]
inverse
```
## Transformer tableau en dictionnaire
```
train1 = ['S22', '8h47', 'Vufflens-la-Ville', 1]
train2 = ['S2', '8h51','Aigle', 4]
train3 = ['S4', '8h55', 'Palézieux', 3]
horaire = [train1, train2, train3]
horaire
horaire_dict = {}
n = len(horaire)
for i in range(n):
horaire_dict[i] = horaire[i]
horaire_dict
horaire_dict[1][2]
```
|
github_jupyter
|
d = dict()
d
d['un'] = 'one'
d['deux'] = 'two'
d
d['deux']
d['dix'] = 'ten'
d
len(d)
'deux' in d
'two' in d
phrase = 'dictionnaire'
d = {}
for c in phrase:
if c in d:
d[c] += 1
else:
d[c] = 1
d
d.get('b', 0)
d = {}
for c in phrase:
d[c] = d.get(c, 0) + 1
d
for c in d:
print(c, d[c])
list(d)
sorted(d)
d.values()
list(d.values())
set(d.values())
inverse = {}
for c in d:
val = d[c]
if val in inverse:
inverse[val].append(c)
else:
inverse[val] = [c]
inverse
train1 = ['S22', '8h47', 'Vufflens-la-Ville', 1]
train2 = ['S2', '8h51','Aigle', 4]
train3 = ['S4', '8h55', 'Palézieux', 3]
horaire = [train1, train2, train3]
horaire
horaire_dict = {}
n = len(horaire)
for i in range(n):
horaire_dict[i] = horaire[i]
horaire_dict
horaire_dict[1][2]
| 0.105452 | 0.985718 |
# Operations on VR Objects
The VR Specification describes operations on variants that should be supported by implementations. This notebook demonstrates the following functions:
* `normalize`: Implements sequence normalization for insertion and deletion variation
* `sha512t24u`: Implements a convention constructing and formatting digests for an object
* `ga4gh_digest`: Generates a digest for a GA4GH object
* `ga4gh_serialize`: Serializes a GA4GH object using a canonical binary form
* `ga4gh_identify`: Generates a CURIE identifier for a GA4GH object
<img src="images/id-dig-ser.png" width="75%" alt="Operations Overview"/>
**Note:** Most implementation users will need only the `ga4gh_identify` function.
We describe the `ga4gh_serialize`, `ga4gh_digest`, and `sha512t24u` functions here for completeness.
<div class="alert alert-warning">
These operations require access to external data to translate sequence identifiers.
See the vr-python README for installation options.
</div>
## Load data saved by Schema notebook
Loads the allele json and rehydrates an Allele object
```
import json
from ga4gh.vrs import models
data = json.load(open("objects.json"))
allele = models.Variation(**data["alleles"][0])
print(allele)
print(allele.as_dict())
```
## External Sequence Data
In order to support the full functionality of VR, implementations require access to all sequences and sequence identifiers that are uses as variation reference sequences. For the purposes of this notebook, data are mocked as static responses.
The VR specification leaves the choice of those data sources to the implementations. In vr-python, `ga4gh.vrs.dataproxy` provides an abstract base class as a basis for data source adapters. One source is [SeqRepo](https://github.com/biocommons/biocommons.seqrepo/), which is used below. (An adapter based on the GA4GH refget specification exists, but is pending necessary changes to the refget interface to provide accession-based lookups.)
SeqRepo: [github](https://github.com/biocommons/biocommons.seqrepo/) | [data snapshots](http://dl.biocommons.org/seqrepo/) | [seqrepo-rest-service @ github](https://github.com/biocommons/seqrepo-rest-service) | [seqrepo-rest-service docker images](https://cloud.docker.com/u/biocommons/repository/docker/biocommons/seqrepo-rest-service)
RefGet: [spec](https://samtools.github.io/hts-specs/refget.html) | [perl server](https://github.com/andrewyatz/refget-server-perl)
```
from ga4gh.core import sha512t24u
from ga4gh.core import ga4gh_digest, ga4gh_identify, ga4gh_serialize
from ga4gh.vrs import __version__, models
from ga4gh.vrs.dataproxy import SeqRepoRESTDataProxy
# Requires seqrepo REST interface is running on this URL (e.g., using docker image)
seqrepo_rest_service_url = "http://localhost:5000/seqrepo"
dp = SeqRepoRESTDataProxy(base_url=seqrepo_rest_service_url)
dp.translate_sequence_identifier("refseq:NC_000019.10", "ga4gh")
dp.get_sequence("ga4gh:SQ.IIB53T8CNeJJdUqzn9V_JnRtQadwWCbl", start=44908821-25, end=44908822+25)
```
## normalize()
VR Spec REQUIRES that variation is reported as "expanded" alleles. Expanded alleles capture the entire region of insertion/deletion amiguity, thereby facilitating comparisons that would otherwise require on-the-fly computations. Note: this example is using the bioutils normalize method, rather than the vrs, since that one does not support shuffling.
```
# Define a dinucleotide insertion on the following sequence at interbase (13, 13)
sequence = "CCCCCCCCACACACACACTAGCAGCAGCA"
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9
# C C C C C C C C A C A C A C A C A C T A G C A G C A G C A
# ^ insert CA here
interval = (13, 13)
alleles = (None, "CA")
args = dict(sequence=sequence, interval=interval, alleles=alleles, bounds=(0,len(sequence)))
import bioutils
# The expanded allele sequences. This is a concept that is valid in HGVS space.
bioutils.normalize.normalize(**args, mode="EXPAND")
# For comparison, the left and right shuffled alleles
bioutils.normalize.normalize(**args, mode="LEFTSHUFFLE")
bioutils.normalize.normalize(**args, mode="RIGHTSHUFFLE")
# In contrast in the VR spec we provide fully justified representations:
from ga4gh.vrs import normalize
normalize()
```
### sha512t24u() — Truncated SHA-512 digest
The `sha512t24u` is a convention for constructing unique identifiers from binary objects (as from serialization) using well-known SHA512 hashing and Base64 (i.e., base64url) encoding.
```
sha512t24u(b"")
sha512t24u(b"ACGT")
```
## Computing Identifiers for objects
### ga4gh_serialize()
Serialization is the process of converting an object to a *binary* representation for transmission or communication. In the context of generating GA4GH identifiers, serialization is a process to generate a *canonical* JSON form in order to generate a digest. The VR serialization is based on a JSON canonincialization scheme consistent with several existing proposals. See the spec for details.
Because the serialization and digest methods are well-defined, groups with the same data will generate the same digests and computed identifiers.
GA4GH serialization replaces inline identifiable objects with their digests in order to create a well-defined ordering. See the `location` property in the `Allele` example below.
<br>
<div>
<div style="border-radius: 10px; width: 80%; margin: 0 auto; padding: 5px; border: 2pt solid #660000; color: #660000; background: #f4cccc;">
<span style="font-size: 200%">⚠</span> Although JSON serialization and GA4GH canonical JSON serialization appear similar, they are NOT interchangeable and will generated different digests. GA4GH identifiers are defined <i>only</i> when used with GA4GH serialization process.
</div>
</div>
```
# This is the "simple" allele defined above, repeated here for readability
# Note that the location data is inlined
allele.as_dict()
# This is the serialized form. Notice that the inline `Location` instance was replaced with
# its identifier and that the Allele id is not included.
ga4gh_serialize(allele)
```
## ga4gh_digest()
ga4gh_digest() returns the sha512t24u digest of a ga4gh_serialize'd object. The digest is cached within the object itself to minimize recomputation.
```
ga4gh_digest(allele)
sha512t24u(ga4gh_serialize(allele))
```
### ga4gh_identify()
VR computed identifiers are constructed from digests on serialized objects by prefixing a VR digest with a type-specific code.
```
# identify() uses this digest to construct a CURIE-formatted identifier.
# The VA prefix identifies this object as a Variation Allele.
ga4gh_identify(allele)
```
|
github_jupyter
|
import json
from ga4gh.vrs import models
data = json.load(open("objects.json"))
allele = models.Variation(**data["alleles"][0])
print(allele)
print(allele.as_dict())
from ga4gh.core import sha512t24u
from ga4gh.core import ga4gh_digest, ga4gh_identify, ga4gh_serialize
from ga4gh.vrs import __version__, models
from ga4gh.vrs.dataproxy import SeqRepoRESTDataProxy
# Requires seqrepo REST interface is running on this URL (e.g., using docker image)
seqrepo_rest_service_url = "http://localhost:5000/seqrepo"
dp = SeqRepoRESTDataProxy(base_url=seqrepo_rest_service_url)
dp.translate_sequence_identifier("refseq:NC_000019.10", "ga4gh")
dp.get_sequence("ga4gh:SQ.IIB53T8CNeJJdUqzn9V_JnRtQadwWCbl", start=44908821-25, end=44908822+25)
# Define a dinucleotide insertion on the following sequence at interbase (13, 13)
sequence = "CCCCCCCCACACACACACTAGCAGCAGCA"
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9
# C C C C C C C C A C A C A C A C A C T A G C A G C A G C A
# ^ insert CA here
interval = (13, 13)
alleles = (None, "CA")
args = dict(sequence=sequence, interval=interval, alleles=alleles, bounds=(0,len(sequence)))
import bioutils
# The expanded allele sequences. This is a concept that is valid in HGVS space.
bioutils.normalize.normalize(**args, mode="EXPAND")
# For comparison, the left and right shuffled alleles
bioutils.normalize.normalize(**args, mode="LEFTSHUFFLE")
bioutils.normalize.normalize(**args, mode="RIGHTSHUFFLE")
# In contrast in the VR spec we provide fully justified representations:
from ga4gh.vrs import normalize
normalize()
sha512t24u(b"")
sha512t24u(b"ACGT")
# This is the "simple" allele defined above, repeated here for readability
# Note that the location data is inlined
allele.as_dict()
# This is the serialized form. Notice that the inline `Location` instance was replaced with
# its identifier and that the Allele id is not included.
ga4gh_serialize(allele)
ga4gh_digest(allele)
sha512t24u(ga4gh_serialize(allele))
# identify() uses this digest to construct a CURIE-formatted identifier.
# The VA prefix identifies this object as a Variation Allele.
ga4gh_identify(allele)
| 0.517083 | 0.950641 |
```
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Social_Network_Ads.csv')
X = dataset.iloc[:, [2, 3]].values
y = dataset.iloc[:, 4].values
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Fitting Random Forest Classification to the Training set
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0)
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
cm
# Visualising the Training set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('pink', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Random Forest Classification (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
# Visualising the Test set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('pink', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Random Forest Classification (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
```
|
github_jupyter
|
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Social_Network_Ads.csv')
X = dataset.iloc[:, [2, 3]].values
y = dataset.iloc[:, 4].values
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Fitting Random Forest Classification to the Training set
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0)
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
cm
# Visualising the Training set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('pink', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Random Forest Classification (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
# Visualising the Test set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('pink', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Random Forest Classification (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
| 0.764452 | 0.837753 |
# CMPSC 100: `Week 06` Sandbox
---
In the computer science world, a "sandbox" is almost exactly what it is in the real world: a place to, effectively, "play."
## Usage
First and foremost, this space is for *you*.
Use this notebook to try out code presented in class or do self-guided exploration. Your instructor may reference this notebook and ask that you write some code in it to follow along with in-class exercises or live programming.
I also encourage you to use this as a space to take notes as you participate in discussions or practice coding concepts on your own.
### Reminders
The following key combinations and shortcuts will speed your work in this notebook.
#### With cursor in cell
| Key(s) | Outcome |
|:-|:-|
| `Enter` | Adds a new line in the current cell |
| `Shift` + `Enter` | Runs the current cell and moves to the first cell below |
| `Ctrl` + `Enter` | Runs the current cell; cursor remains in cell |
| `Alt` + `Enter` | Runs current cell and inserts new cell below ||
#### With cell selected
| Key(s) | Outcome |
|:-|:-|
| `A` | Add cell above current cell |
| `B` | Add cell below current cell |
| `D` | Delete current cell |
| `Shift` + `M` | Merge current cell with below |
| `M` | Changes current cell to Markdown format|
| `Y` | Changes current cell to code format |
## Helpful readings
The readings below are meant to serve as references for you as you explore practical use of our course's platforms. As always, however, feel free to reach out to your instructors or post questions in our course [Slack](cmpsc-100-00-fa-2020.slack.com).
### Using Jupyter notebooks
For questions about using a Jupyter Notebook, our colleagues at Bryn Mawr created a [Jupyter Notebook User's Manual](https://jupyter.brynmawr.edu/services/public/dblank/Jupyter%20Notebook%20Users%20Manual.ipynb) which answers basic and advanced questions about the platform.
### Markdown
I strongly recommend reading the [Mastering Markdown](https://guides.github.com/features/mastering-markdown/) guide to serve as a Markdown reference.
## Sandbox
```
message = "Sally sells sea shells by the sea shore."
# any time we convert characters to ASCII code points (rly numbers) we are getting the "ordinal" value
print(ord("S"))
print(ord("s"))
# any time we do the reverse, we use "chr"
print(chr(83))
print(chr(115))
print(len(message)) # <-- we have a known, fixed length
print(message[10:30])
# methods?
print(message.upper())
print(message.lower())
print(message.count("s"))
print(message.count("S"))
# string are immutable
print(message.replace("Sally","Martha"))
print(message)
message = message.upper()
print(message)
# f-strings
name = input("What's yer name?")
print(f"Oh, hello {name}.")
poem = open("frost_the_road_not_taken.txt","r")
text = poem.read()
text
print(text)
poem = open("frost_the_road_not_taken.txt","r")
text = poem.readlines()
print(text)
for line in text:
print(line)
# TODO: Read lines
# TODO: Open the file
open_message = open("../lab/coded_message.txt", "r")
read_message = open_message.read()
print(read_message)
# TODO: Iterate over the file's contents
letters = []
set_numbers = read_message.split()
#print(set_numbers)
for numbers in set_numbers:
all_numbers = int(numbers)
coded_letters = chr(all_numbers)
letters.append(coded_letters)
#print(chr(all_numbers))
"".join(letters)
print(letters)
```
|
github_jupyter
|
message = "Sally sells sea shells by the sea shore."
# any time we convert characters to ASCII code points (rly numbers) we are getting the "ordinal" value
print(ord("S"))
print(ord("s"))
# any time we do the reverse, we use "chr"
print(chr(83))
print(chr(115))
print(len(message)) # <-- we have a known, fixed length
print(message[10:30])
# methods?
print(message.upper())
print(message.lower())
print(message.count("s"))
print(message.count("S"))
# string are immutable
print(message.replace("Sally","Martha"))
print(message)
message = message.upper()
print(message)
# f-strings
name = input("What's yer name?")
print(f"Oh, hello {name}.")
poem = open("frost_the_road_not_taken.txt","r")
text = poem.read()
text
print(text)
poem = open("frost_the_road_not_taken.txt","r")
text = poem.readlines()
print(text)
for line in text:
print(line)
# TODO: Read lines
# TODO: Open the file
open_message = open("../lab/coded_message.txt", "r")
read_message = open_message.read()
print(read_message)
# TODO: Iterate over the file's contents
letters = []
set_numbers = read_message.split()
#print(set_numbers)
for numbers in set_numbers:
all_numbers = int(numbers)
coded_letters = chr(all_numbers)
letters.append(coded_letters)
#print(chr(all_numbers))
"".join(letters)
print(letters)
| 0.163145 | 0.893263 |
<table>
<tr>
<td>
<img src='./text_images/nvidia.png' width="200" height="450">
</td>
<td> & </td>
<td>
<img src='./text_images/udacity.png' width="350" height="450">
</td>
</tr>
</table>
# Deep Reinforcement Learning for Optimal Execution of Portfolio Transactions
# Introduction
This notebook demonstrates how to use Deep Reinforcement Learning (DRL) for optimizing the execution of large portfolio transactions. We begin with a brief review of reinforcement learning and actor-critic methods. Then, you will use an actor-critic method to generate optimal trading strategies that maximize profit when liquidating a block of shares.
# Actor-Critic Methods
In reinforcement learning, an agent makes observations and takes actions within an environment, and in return it receives rewards. Its objective is to learn to act in a way that will maximize its expected long-term rewards.
<br>
<figure>
<img src = "./text_images/RL.png" width = 80% style = "border: thin silver solid; padding: 10px">
<figcaption style = "text-align: center; font-style: italic">Fig 1. - Reinforcement Learning.</figcaption>
</figure>
<br>
There are several types of RL algorithms, and they can be divided into three groups:
- **Critic-Only**: Critic-Only methods, also known as Value-Based methods, first find the optimal value function and then derive an optimal policy from it.
- **Actor-Only**: Actor-Only methods, also known as Policy-Based methods, search directly for the optimal policy in policy space. This is typically done by using a parameterized family of policies over which optimization procedures can be used directly.
- **Actor-Critic**: Actor-Critic methods combine the advantages of actor-only and critic-only methods. In this method, the critic learns the value function and uses it to determine how the actor's policy parramerters should be changed. In this case, the actor brings the advantage of computing continuous actions without the need for optimization procedures on a value function, while the critic supplies the actor with knowledge of the performance. Actor-critic methods usually have good convergence properties, in contrast to critic-only methods. The **Deep Deterministic Policy Gradients (DDPG)** algorithm is one example of an actor-critic method.
<br>
<figure>
<img src = "./text_images/Actor-Critic.png" width = 80% style = "border: thin silver solid; padding: 10px">
<figcaption style = "text-align: center; font-style: italic">Fig 2. - Actor-Critic Reinforcement Learning.</figcaption>
</figure>
<br>
In this notebook, we will use DDPG to determine the optimal execution of portfolio transactions. In other words, we will use the DDPG algorithm to solve the optimal liquidation problem. But before we can apply the DDPG algorithm we first need to formulate the optimal liquidation problem so that in can be solved using reinforcement learning. In the next section we will see how to do this.
# Modeling Optimal Execution as a Reinforcement Learning Problem
As we learned in the previous lessons, the optimal liquidation problem is a minimization problem, *i.e.* we need to find the trading list that minimizes the implementation shortfall. In order to solve this problem through reinforcement learning, we need to restate the optimal liquidation problem in terms of **States**, **Actions**, and **Rewards**. Let's start by defining our States.
### States
The optimal liquidation problem entails that we sell all our shares within a given time frame. Therefore, our state vector must contain some information about the time remaining, or what is equivalent, the number trades remaning. We will use the latter and use the following features to define the state vector at time $t_k$:
$$
[r_{k-5},\, r_{k-4},\, r_{k-3},\, r_{k-2},\, r_{k-1},\, r_{k},\, m_{k},\, i_{k}]
$$
where:
- $r_{k} = \log\left(\frac{\tilde{S}_k}{\tilde{S}_{k-1}}\right)$ is the log-return at time $t_k$
- $m_{k} = \frac{N_k}{N}$ is the number of trades remaining at time $t_k$ normalized by the total number of trades.
- $i_{k} = \frac{x_k}{X}$ is the remaining number of shares at time $t_k$ normalized by the total number of shares.
The log-returns capture information about stock prices before time $t_k$, which can be used to detect possible price trends. The number of trades and shares remaining allow the agent to learn to sell all the shares within a given time frame. It is important to note that in real world trading scenarios, this state vector can hold many more variables.
### Actions
Since the optimal liquidation problem only requires us to sell stocks, it is reasonable to define the action $a_k$ to be the number of shares to sell at time $t_{k}$. However, if we start with millions of stocks, intepreting the action directly as the number of shares to sell at each time step can lead to convergence problems, because, the agent will need to produce actions with very high values. Instead, we will interpret the action $a_k$ as a **percentage**. In this case, the actions produced by the agent will only need to be between 0 and 1. Using this interpretation, we can determine the number of shares to sell at each time step using:
$$
n_k = a_k \times x_k
$$
where $x_k$ is the number of shares remaining at time $t_k$.
### Rewards
Defining the rewards is trickier than defining states and actions, since the original problem is a minimization problem. One option is to use the difference between two consecutive utility functions. Remeber the utility function is given by:
$$
U(x) = E(x) + λ V(x)
$$
After each time step, we compute the utility using the equations for $E(x)$ and $V(x)$ from the Almgren and Chriss model for the remaining time and inventory while holding parameter λ constant. Denoting the optimal trading trajectory computed at time $t$ as $x^*_t$, we define the reward as:
$$
R_{t} = {{U_t(x^*_t) - U_{t+1}(x^*_{t+1})}\over{U_t(x^*_t)}}
$$
Where we have normalized the difference to train the actor-critic model easier.
# Simulation Environment
In order to train our DDPG algorithm we will use a very simple simulated trading environment. This environment simulates stock prices that follow a discrete arithmetic random walk and that the permanent and temporary market impact functions are linear functions of the rate of trading, just like in the Almgren and Chriss model. This simple trading environment serves as a starting point to create more complex trading environments. You are encouraged to extend this simple trading environment by adding more complexity to simulte real world trading dynamics, such as book orders, network latencies, trading fees, etc...
The simulated enviroment is contained in the **syntheticChrissAlmgren.py** module. You are encouraged to take a look it and modify its parameters as you wish. Let's take a look at the default parameters of our simulation environment. We have set the intial stock price to be $S_0 = 50$, and the total number of shares to sell to one million. This gives an initial portfolio value of $\$50$ Million dollars. We have also set the trader's risk aversion to $\lambda = 10^{-6}$.
The stock price will have 12\% annual volatility, a [bid-ask spread](https://www.investopedia.com/terms/b/bid-askspread.asp) of 1/8 and an average daily trading volume of 5 million shares. Assuming there are 250 trading days in a year, this gives a daily volatility in stock price of $0.12 / \sqrt{250} \approx 0.8\%$. We will use a liquiditation time of $T = 60$ days and we will set the number of trades $N = 60$. This means that $\tau=\frac{T}{N} = 1$ which means we will be making one trade per day.
For the temporary cost function we will set the fixed cost of selling to be 1/2 of the bid-ask spread, $\epsilon = 1/16$. we will set $\eta$ such that for each one percent of the daily volume we trade, we incur a price impact equal to the bid-ask
spread. For example, trading at a rate of $5\%$ of the daily trading volume incurs a one-time cost on each trade of 5/8. Under this assumption we have $\eta =(1/8)/(0.01 \times 5 \times 10^6) = 2.5 \times 10^{-6}$.
For the permanent costs, a common rule of thumb is that price effects become significant when we sell $10\%$ of the daily volume. If we suppose that significant means that the price depression is one bid-ask spread, and that the effect is linear for smaller and larger trading rates, then we have $\gamma = (1/8)/(0.1 \times 5 \times 10^6) = 2.5 \times 10^{-7}$.
The tables below summarize the default parameters of the simulation environment
```
import utils
# Get the default financial and AC Model parameters
financial_params, ac_params = utils.get_env_param()
financial_params
ac_params
```
# Reinforcement Learning
In the code below we use DDPG to find a policy that can generate optimal trading trajectories that minimize implementation shortfall, and can be benchmarked against the Almgren and Chriss model. We will implement a typical reinforcement learning workflow to train the actor and critic using the simulation environment. We feed the states observed from our simulator to an agent. The Agent first predicts an action using the actor model and performs the action in the environment. Then, environment returns the reward and new state. This process continues for the given number of episodes. To get accurate results, you should run the code at least 10,000 episodes.
```
import numpy as np
import syntheticChrissAlmgren as sca
from ddpg_agent import Agent
from collections import deque
# Create simulation environment
env = sca.MarketEnvironment()
# Initialize Feed-forward DNNs for Actor and Critic models.
agent = Agent(state_size=env.observation_space_dimension(), action_size=env.action_space_dimension(), random_seed=0)
# Set the liquidation time
lqt = 60
# Set the number of trades
n_trades = 60
# Set trader's risk aversion
tr = 1e-6
# Set the number of episodes to run the simulation
episodes = 3000
shortfall_hist = np.array([])
shortfall_deque = deque(maxlen=100)
for episode in range(episodes):
# Reset the enviroment
cur_state = env.reset(seed = episode, liquid_time = lqt, num_trades = n_trades, lamb = tr)
# set the environment to make transactions
env.start_transactions()
for i in range(n_trades + 1):
# Predict the best action for the current state.
action = agent.act(cur_state, add_noise = True)
# Action is performed and new state, reward, info are received.
new_state, reward, done, info = env.step(action)
# current state, action, reward, new state are stored in the experience replay
agent.step(cur_state, action, reward, new_state, done)
# roll over new state
cur_state = new_state
if info.done:
shortfall_hist = np.append(shortfall_hist, info.implementation_shortfall)
shortfall_deque.append(info.implementation_shortfall)
break
if (episode + 1) % 100 == 0: # print average shortfall over last 100 episodes
print('\rEpisode [{}/{}]\tAverage Shortfall: ${:,.2f}'.format(episode + 1, episodes, np.mean(shortfall_deque)))
print('\nAverage Implementation Shortfall: ${:,.2f} \n'.format(np.mean(shortfall_hist)))
```
# Todo
The above code should provide you with a starting framework for incorporating more complex dynamics into our model. Here are a few things you can try out:
- Incorporate your own reward function in the simulation environmet to see if you can achieve a expected shortfall that is better (lower) than that produced by the Almgren and Chriss model.
- Experiment rewarding the agent at every step and only giving a reward at the end.
- Use more realistic price dynamics, such as geometric brownian motion (GBM). The equations used to model GBM can be found in section 3b of this [paper](https://ro.uow.edu.au/cgi/viewcontent.cgi?referer=https://www.google.com/&httpsredir=1&article=1705&context=aabfj)
- Try different functions for the action. You can change the values of the actions produced by the agent by using different functions. You can choose your function depending on the interpretation you give to the action. For example, you could set the action to be a function of the trading rate.
- Add more complex dynamics to the environment. Try incorporate trading fees, for example. This can be done by adding and extra term to the fixed cost of selling, $\epsilon$.
|
github_jupyter
|
import utils
# Get the default financial and AC Model parameters
financial_params, ac_params = utils.get_env_param()
financial_params
ac_params
import numpy as np
import syntheticChrissAlmgren as sca
from ddpg_agent import Agent
from collections import deque
# Create simulation environment
env = sca.MarketEnvironment()
# Initialize Feed-forward DNNs for Actor and Critic models.
agent = Agent(state_size=env.observation_space_dimension(), action_size=env.action_space_dimension(), random_seed=0)
# Set the liquidation time
lqt = 60
# Set the number of trades
n_trades = 60
# Set trader's risk aversion
tr = 1e-6
# Set the number of episodes to run the simulation
episodes = 3000
shortfall_hist = np.array([])
shortfall_deque = deque(maxlen=100)
for episode in range(episodes):
# Reset the enviroment
cur_state = env.reset(seed = episode, liquid_time = lqt, num_trades = n_trades, lamb = tr)
# set the environment to make transactions
env.start_transactions()
for i in range(n_trades + 1):
# Predict the best action for the current state.
action = agent.act(cur_state, add_noise = True)
# Action is performed and new state, reward, info are received.
new_state, reward, done, info = env.step(action)
# current state, action, reward, new state are stored in the experience replay
agent.step(cur_state, action, reward, new_state, done)
# roll over new state
cur_state = new_state
if info.done:
shortfall_hist = np.append(shortfall_hist, info.implementation_shortfall)
shortfall_deque.append(info.implementation_shortfall)
break
if (episode + 1) % 100 == 0: # print average shortfall over last 100 episodes
print('\rEpisode [{}/{}]\tAverage Shortfall: ${:,.2f}'.format(episode + 1, episodes, np.mean(shortfall_deque)))
print('\nAverage Implementation Shortfall: ${:,.2f} \n'.format(np.mean(shortfall_hist)))
| 0.472197 | 0.991221 |
## Lists, Dictionaries and Sets in Python
## Part 1
### Lists in Python
Sequences in Python are data structures that hold objects in an ordered array. Now, we will work on Lists, the most common sequence data types in Python.
```
#Example
l1 = ['learning', "Python", 'is fun?', True]
print(l1)
```
List can also be created by using list() function.
```
#Example
l2 = list(("learning", "for", "life", True))
print(l2)
```
Adding to an existing list
### Question 1
Add 10 to list l1 given above.
[ **Hint: ** Use **append** ]
```
l1.append(10)
print(l1)
```
Removing from an existing list
### Question 2
Remove 10 from l1.
[ **Hint:** Use **remove**]
```
l1.remove(10)
print(l1)
```
Joining 2 lists
### Question 3
[ **Hint: ** Use **+** operator or **extend**]
```
l1 = ['learning', "Python", 'is fun?', True]
l2 = list(("learning", "for", "life", True))
print(l1+l2)
```
Number List
### Question 4
Find Range and Mean of l3.
l3 = [2,4,6,8]
[ **Hint: ** Use **len(),sum(), min(), max()** functions ]
If you want to use standard functions like mean & range, you have to import them from numpy else you can calculate them the traditional way using formulas
```
l3=[2,4,6,8]
print("Range is ",'[',max(l3),',',min(l3),']')
print("Mean is ",sum(l3)/len(l3))
```
Count the occurances of an element in a given list.
### Question 5
Append the given sequence of numbers to l3 (given above) 0,1,3,3,5,5,7,9. Count the occurences of 5 in l3.
[ **Hint: ** Use ** + operator to add multiple elements in the array and count() function to print the occurences**]
```
#The statement will split th input by ',' , cast it to integer and append to the list
lis=list(map(int,input().split(',')))
print(lis)
print("The occurences of 5 in lis are :",lis.count(5))
```
Sorting and Reversing a list
### Question 6
sort and print l3 in ascending and descending order sequentially (given above)
**(Hint: Use .sort() function)**
```
#We sorted the list
l3.sort()
print(l3)
#Reversed the sorted list, we can print descending order of list by placing reverse attribute as true in sort function
l3.reverse()
print(l3)
```
### Functions
**Example:**
**def** function_name(args)**:**
function code goes here
### Question 7
Define a function with name **sum_3** which can take 3 numbers as input, and returns sum of them.
```
def sum_3(lst):
return sum(lst)
k=[int(i) for i in input("Enter 3 Numbers with spaces : ").split()] #using list comprehension
sum_3(k)
```
### Lambda Functions
Anonymous functions or no name functions, which can be considered when you use a function only once.
**Example:**
f = lambda x, y : x + y
f(1,1)
2
### Question 8
Write the same above **sum_3** function using lambda.
```
sum_3=lambda x,y,z:x+y+z
#the map makes use of eval to cast them to integers and call the sum_3 Anonymous function,
#which returns the sum of the three numbers
print(sum_3(*map(eval,input("Enter 3 numbers with spaces ").split())))
```
# Numpy
We have seen python basic data structures in our last section. They are great but lack specialized features for data analysis. Like, adding roows, columns, operating on 2d matrices aren't readily available. So, we will use *numpy* for such functions.
```
import numpy as np
```
Numpy operates on *nd* arrays. These are similar to lists but contains homogenous elements but easier to store 2-d data.
```
l1 = [1,2,3,4]
nd1 = np.array(l1)
print(nd1)
l2 = [5,6,7,8]
nd2 = np.array([l1,l2])
print(nd2)
```
Sum functions on np.array()
```
print(nd2.shape)
print(nd2.size)
print(nd2.dtype)
```
### Question 1
Create an identity 2d-array or matrix (with ones across the diagonal).
[ **Hint: ** You can also use **np.identity()** function ]
```
#Taking input from user
n=int(input("Enter the dimension of numpy identity array"))
print(np.identity(n))
```
### Question 2
Create a 2d-array or matrix of order 3x3 with values = 9,8,7,6,5,4,3,2,1 arranged in the same order.
Use: **np.matrix()** function
```
# Taking a numpy array using the arange function in reverse order
#numpy.arange(start,stop,step)
ndarr=np.arange(9,0,-1).reshape(3,3)
print(ndarr)
```
### Question 3
Reverse both the rows and columns of the given matrix.
Hint: You can use the transpose **.T**)
```
# Used Transpose attribute in numpy to reverse the shape of 2d array
ndarr.T
```
### Question 4
Add + 1 to all the elements in the given matrix.
```
#We can simply add +1 to the array
ndarr+1
```
Similarly you can do operations like scalar substraction, division, multiplication (operating on each element in the matrix)
### Question 5
Find the mean of all elements in the given matrix nd6.
nd6 = [[ 1 4 9 121 144 169]
[ 16 25 36 196 225 256]
[ 49 64 81 289 324 361]]
Use: **.mean()** function
```
#Taking input nd6 , as we can observe a series of squares , we could do the following
nd=(np.concatenate((np.arange(1,10).reshape(3,3),np.arange(11,20).reshape(3,3)),axis=1))**2
print(nd)
print(nd.mean())
```
### Question 7
Find the dot product of two given matrices.
[**Hint:** Use **np.dot()**]
```
# We can assume two matrices as input ,which are (numpy.random.randint) .using np.random.randint(start,stop,(shape))
np1=np.random.randint(2,90,(3,3))
print(np1)
np2=np.random.randint(3,60,(3,3))
print(np2)
print(np.dot(np1,np2))
```
# Pandas
We have seen Numpy in the last section. It is good at performing math operation on 2d-arrays of numbers. But the major drawback is, it cannot deal with heterogenous values. So, Pandas dataframes are helpful in that aspect for storing different data types and referring the values like a dict in python instead of just referring each item with index.
[Link to Official Documentation](http://pandas.pydata.org/pandas-docs/version/0.23/dsintro.html)
## Series
Pandas series are almost same as nd arrays in numpy, with a additional inferencing ability with custom labels like *keys* in a *dictionary* in python.
```
import numpy as np
import pandas as pd
#Example
series1 = pd.Series(data = [1,2,3], index = ['key1', 'key2', 'key3'])
series1
```
### Question 1
Convert a given dict to pd series.
[**Hint:** Use **.Series**]
```
d1 = {'a': 1, 'b': 2, 'c': 3}
series2=pd.Series(d1,index=d1.keys())
print(pd1)
```
You can directly use numpy functions on series.
### Question 2
Find the dot product of both the series create above
[ **Hint: ** Use **np.dot()** ]
```
#Using series1 and series2 above
np.dot(series1,series2)
```
## Dataframes
A dataframe is a table with labeled columns which can hold different types of data in each column.
```
# Example
d1 = {'a': [1,2,3], 'b': [3,4,5], 'c':[6,7,8] }
df1 = pd.DataFrame(d1)
df1
```
### Question 3
Select second row in the above dataframe df1.
```
#Indexing for the second row using DataFrame.iloc[:,:]
df1.iloc[1]
```
### Question 4
Select column c in second row of df1.
[ **Hint: ** For using labels use **df.loc[row, column]**. For using numeric indexed use **df.iloc[]**. For using mixture of numeric indexes and labels use **df.ix[row, column]** ]
```
#As we are asked for column C , we can make use of DataFrame.loc
df1.loc[:,'c']
```
## Using Dataframes on a dataset
##### Using the mtcars dataset.
For the below set of questions, we will be using the cars data from [Motor Trend Car Road Tests](http://stat.ethz.ch/R-manual/R-devel/library/datasets/html/mtcars.html)
The data was extracted from the 1974 Motor Trend US magazine, and comprises fuel consumption and 10 aspects of automobile design and performance for 32 automobiles (1973–74 models).
Details :
A data frame with 32 observations on 11 (numeric) variables.
[, 1] mpg Miles/(US) gallon
[, 2] cyl Number of cylinders
[, 3] disp Displacement (cu.in.)
[, 4] hp Gross horsepower
[, 5] drat Rear axle ratio
[, 6] wt Weight (1000 lbs)
[, 7] qsec 1/4 mile time
[, 8] vs Engine (0 = V-shaped, 1 = straight)
[, 9] am Transmission (0 = automatic, 1 = manual)
[,10] gear Number of forward gears
[,11] carb Number of carburetors
```
## Reading a dataset from a csv file using pandas.
mtcars = pd.read_csv('C:\\Users\\sande\\Desktop\\h20\\AIML\\Residency-1\\Lab_internal\\mtcars.csv')
mtcars.index = mtcars['name']
```
Following questions are based on analysing a particular dataset using dataframes.
### Question 5
Check the type and dimensions of given dataset - mtcars.
[ **Hint: ** Use **type()** and **df.shape** ]
```
#We can use DataFrame.info also
print("Type columns in mtcars is",mtcars.dtypes)
print("\nShape of mtcars :",mtcars.shape)
```
### Question 6
Check the first 10 lines and last 10 lines of the given dataset- mtcars.
[**Hint:** Use **.head()** and **.tail()**]
```
#We can use DataFrame.head() and DataFrame.tail()
print("First 10 lines in mtcars :")
print(mtcars.head())
print("Last 10 lines in mtcars :")
print(mtcars.tail())
```
### Question 7
Print all the column labels in the given dataset - mtcars.
[ **Hint: ** Use **df.columns** ]
```
#We can use DataFrame.columns
print(mtcars.columns)
```
### Question 8
Select first 6 rows and 3 columns in mtcars dataframe.
**Hint: **
mtcars.ix[:,:] gives all rows and columns in the dataset.
```
print(mtcars.iloc[:6,:3])
```
## Lab Brief - Statistical Learning with Python
### Part-1
## Measures of Data
Descriptive Statistcs are measures that summarize important features of data. Producing these descriptive statistics is a common first step to take after cleaning and preparing a dataset for further analysis. Examples are like calculating Mean or Median. In this lab, we will be looking into some of these functions and explore several new ones.
#### Measures of Central Tendency
Whenever you measure things of the same kind, a fairly large number of such measurements will tend to cluster around the middle value. Such a value is called a measure of "Central Tendency". The other terms that are used synonymously are "Measures of Location", or "Statistical Averages".
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
mtcars = pd.read_csv('C:\\Users\\sande\\Desktop\\h20\\AIML\\Residency-1\\Lab_internal\\mtcars.csv')
```
##### Using the mtcars dataset.
For the below set of questions, we will be using the cars data from [Motor Trend Car Road Tests](http://stat.ethz.ch/R-manual/R-devel/library/datasets/html/mtcars.html)
The data was extracted from the 1974 Motor Trend US magazine, and comprises fuel consumption and 10 aspects of automobile design and performance for 32 automobiles (1973–74 models).
Details :
A data frame with 32 observations on 11 (numeric) variables.
[, 1] mpg Miles/(US) gallon
[, 2] cyl Number of cylinders
[, 3] disp Displacement (cu.in.)
[, 4] hp Gross horsepower
[, 5] drat Rear axle ratio
[, 6] wt Weight (1000 lbs)
[, 7] qsec 1/4 mile time
[, 8] vs Engine (0 = V-shaped, 1 = straight)
[, 9] am Transmission (0 = automatic, 1 = manual)
[,10] gear Number of forward gears
[,11] carb Number of carburetors
#### Explore the data - Setting the index
```
mtcars.index = mtcars["name"]
mtcars.head()
# mtcars.mean() #By default this will give the mean of each column
```
### Question 1
What is the first car name given in mtcars?
** Make use of index which has been set previously **
**Hint**: Use .loc function
```
#Using loc function we can , use the index name and print the index
print(mtcars.index[0])
```
### Question 2
What is the highest difference between the hp and cyl?
**Hint:** You can use **max()** function to find the difference
```
print(max(mtcars['hp']-mtcars['cyl']))
```
### Question 3
What are the cars having highest weight(wt) and lowest weight(wt)?
**Hint:** Use idxmax() and idxmin() to locate the row
Use **.iloc** function to print the row
**Note:** Both idxmax and idxmin belong to pandas
```
#We use DataFrame.idxmax(axis=0,skipna=True)
#First we print the car using idmax
print("\nMaximum weight car :",mtcars['wt'].idxmax(mtcars['wt']),'\n')
print("\nMinimum weight car :",mtcars['wt'].idxmin(mtcars['wt']),'\n')
#Printing the row using iloc
print("Maximum weight car row\n",mtcars[:][mtcars['wt']==max(mtcars['wt'])],'\n')
print("Minimum weight car row\n",mtcars[:][mtcars['wt']==min(mtcars['wt'])],'\n')
#We can also print the row in this way:
#print("Maximum weight car data",mtcars.loc[mtcars['wt'].idxmax(mtcars['wt'])],'\n')
#print("Minimum weight car data\n",mtcars.loc[mtcars['wt'].idxmin(mtcars['wt'])])
```
### Question 4
Find the mean of each row and each column in the given dataset.
**Hint:** Use .mean() function. To operate on rows use **axis=0**, on columns use **axis=1**
```
print("Mean of Each row :\n",mtcars.mean(axis=0))
print("Mean of Each column :\n",mtcars.mean(axis=1))
```
### Question 5
Plot mpg values of mtcards dataframe using matplotlib.
**Hint: Use plt.plot function**
```
plt.plot(mtcars['mpg'])
plt.show()
```
## Part 2
## Measures of Dispersion
Measures of Dispersion (Spread) are statistics that describe how data varies. Measure of dispersion gives us the sense of how much the data tends to diverge from the central tendency.
### Question 6
What the range of `mpg` in the given dataset?
**Hint:** Range = max-min
```
print("Range of mpg in mtcars is ",max(mtcars['mpg'])-min(mtcars['mpg']))
```
### Question 7
Calculate the 25th quantile (lower quantile)for `mpg` in the given dataset.
**Hint:** Use **.quantile()**
```
mtcars['mpg'].quantile(q=0.25)
```
### Question 8
Calculate the Inter-Quartile Range(IQR) for `mpg` in the given dataset.
Interquartile range (IQR) is another common measure of spread. IQR is the distance between the 3rd quartile and the 1st quartile.
**Hint:** IQR = Q3 - Q1
```
print("The IQR of mpg in mtcars",mtcars['mpg'].quantile(q=0.75)-mtcars['mpg'].quantile(q=0.25))
```
### Question 9
Calculate the Variance, Standard Deviation and Median Absolute Deviation for `mpg`.
Variance and Standard Deviation are two other common measure of dispersion.
**Hint:** Use .var() and .std()
**Hint:** Median Absolute Deviation - alternative measure of spread based on the median. It is the median of the absolute value of the deviations from the median.
```
print("Variance of mpg in mtcars : ",mtcars['mpg'].var())
print("Standard Deviation of mpg in mtcars :",mtcars['mpg'].std())
mtcars['Medianabs'] = abs(mtcars['mpg'] - mtcars['mpg'].median())
print("Median Absolute Deviation is :",mtcars['Medianabs'].median())
```
|
github_jupyter
|
#Example
l1 = ['learning', "Python", 'is fun?', True]
print(l1)
#Example
l2 = list(("learning", "for", "life", True))
print(l2)
l1.append(10)
print(l1)
l1.remove(10)
print(l1)
l1 = ['learning', "Python", 'is fun?', True]
l2 = list(("learning", "for", "life", True))
print(l1+l2)
l3=[2,4,6,8]
print("Range is ",'[',max(l3),',',min(l3),']')
print("Mean is ",sum(l3)/len(l3))
#The statement will split th input by ',' , cast it to integer and append to the list
lis=list(map(int,input().split(',')))
print(lis)
print("The occurences of 5 in lis are :",lis.count(5))
#We sorted the list
l3.sort()
print(l3)
#Reversed the sorted list, we can print descending order of list by placing reverse attribute as true in sort function
l3.reverse()
print(l3)
def sum_3(lst):
return sum(lst)
k=[int(i) for i in input("Enter 3 Numbers with spaces : ").split()] #using list comprehension
sum_3(k)
sum_3=lambda x,y,z:x+y+z
#the map makes use of eval to cast them to integers and call the sum_3 Anonymous function,
#which returns the sum of the three numbers
print(sum_3(*map(eval,input("Enter 3 numbers with spaces ").split())))
import numpy as np
l1 = [1,2,3,4]
nd1 = np.array(l1)
print(nd1)
l2 = [5,6,7,8]
nd2 = np.array([l1,l2])
print(nd2)
print(nd2.shape)
print(nd2.size)
print(nd2.dtype)
#Taking input from user
n=int(input("Enter the dimension of numpy identity array"))
print(np.identity(n))
# Taking a numpy array using the arange function in reverse order
#numpy.arange(start,stop,step)
ndarr=np.arange(9,0,-1).reshape(3,3)
print(ndarr)
# Used Transpose attribute in numpy to reverse the shape of 2d array
ndarr.T
#We can simply add +1 to the array
ndarr+1
#Taking input nd6 , as we can observe a series of squares , we could do the following
nd=(np.concatenate((np.arange(1,10).reshape(3,3),np.arange(11,20).reshape(3,3)),axis=1))**2
print(nd)
print(nd.mean())
# We can assume two matrices as input ,which are (numpy.random.randint) .using np.random.randint(start,stop,(shape))
np1=np.random.randint(2,90,(3,3))
print(np1)
np2=np.random.randint(3,60,(3,3))
print(np2)
print(np.dot(np1,np2))
import numpy as np
import pandas as pd
#Example
series1 = pd.Series(data = [1,2,3], index = ['key1', 'key2', 'key3'])
series1
d1 = {'a': 1, 'b': 2, 'c': 3}
series2=pd.Series(d1,index=d1.keys())
print(pd1)
#Using series1 and series2 above
np.dot(series1,series2)
# Example
d1 = {'a': [1,2,3], 'b': [3,4,5], 'c':[6,7,8] }
df1 = pd.DataFrame(d1)
df1
#Indexing for the second row using DataFrame.iloc[:,:]
df1.iloc[1]
#As we are asked for column C , we can make use of DataFrame.loc
df1.loc[:,'c']
## Reading a dataset from a csv file using pandas.
mtcars = pd.read_csv('C:\\Users\\sande\\Desktop\\h20\\AIML\\Residency-1\\Lab_internal\\mtcars.csv')
mtcars.index = mtcars['name']
#We can use DataFrame.info also
print("Type columns in mtcars is",mtcars.dtypes)
print("\nShape of mtcars :",mtcars.shape)
#We can use DataFrame.head() and DataFrame.tail()
print("First 10 lines in mtcars :")
print(mtcars.head())
print("Last 10 lines in mtcars :")
print(mtcars.tail())
#We can use DataFrame.columns
print(mtcars.columns)
print(mtcars.iloc[:6,:3])
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
mtcars = pd.read_csv('C:\\Users\\sande\\Desktop\\h20\\AIML\\Residency-1\\Lab_internal\\mtcars.csv')
mtcars.index = mtcars["name"]
mtcars.head()
# mtcars.mean() #By default this will give the mean of each column
#Using loc function we can , use the index name and print the index
print(mtcars.index[0])
print(max(mtcars['hp']-mtcars['cyl']))
#We use DataFrame.idxmax(axis=0,skipna=True)
#First we print the car using idmax
print("\nMaximum weight car :",mtcars['wt'].idxmax(mtcars['wt']),'\n')
print("\nMinimum weight car :",mtcars['wt'].idxmin(mtcars['wt']),'\n')
#Printing the row using iloc
print("Maximum weight car row\n",mtcars[:][mtcars['wt']==max(mtcars['wt'])],'\n')
print("Minimum weight car row\n",mtcars[:][mtcars['wt']==min(mtcars['wt'])],'\n')
#We can also print the row in this way:
#print("Maximum weight car data",mtcars.loc[mtcars['wt'].idxmax(mtcars['wt'])],'\n')
#print("Minimum weight car data\n",mtcars.loc[mtcars['wt'].idxmin(mtcars['wt'])])
print("Mean of Each row :\n",mtcars.mean(axis=0))
print("Mean of Each column :\n",mtcars.mean(axis=1))
plt.plot(mtcars['mpg'])
plt.show()
print("Range of mpg in mtcars is ",max(mtcars['mpg'])-min(mtcars['mpg']))
mtcars['mpg'].quantile(q=0.25)
print("The IQR of mpg in mtcars",mtcars['mpg'].quantile(q=0.75)-mtcars['mpg'].quantile(q=0.25))
print("Variance of mpg in mtcars : ",mtcars['mpg'].var())
print("Standard Deviation of mpg in mtcars :",mtcars['mpg'].std())
mtcars['Medianabs'] = abs(mtcars['mpg'] - mtcars['mpg'].median())
print("Median Absolute Deviation is :",mtcars['Medianabs'].median())
| 0.392104 | 0.960025 |
+ This notebook is part of lecture 25 *Symmetric matrices and positive definiteness* in the OCW MIT course 18.06 by Prof Gilbert Strang [1]
+ Created by me, Dr Juan H Klopper
+ Head of Acute Care Surgery
+ Groote Schuur Hospital
+ University Cape Town
+ <a href="mailto:[email protected]">Email me with your thoughts, comments, suggestions and corrections</a>
<a rel="license" href="http://creativecommons.org/licenses/by-nc/4.0/"><img alt="Creative Commons Licence" style="border-width:0" src="https://i.creativecommons.org/l/by-nc/4.0/88x31.png" /></a><br /><span xmlns:dct="http://purl.org/dc/terms/" href="http://purl.org/dc/dcmitype/InteractiveResource" property="dct:title" rel="dct:type">Linear Algebra OCW MIT18.06</span> <span xmlns:cc="http://creativecommons.org/ns#" property="cc:attributionName">IPython notebook [2] study notes by Dr Juan H Klopper</span> is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-nc/4.0/">Creative Commons Attribution-NonCommercial 4.0 International License</a>.
+ [1] <a href="http://ocw.mit.edu/courses/mathematics/18-06sc-linear-algebra-fall-2011/index.htm">OCW MIT 18.06</a>
+ [2] Fernando Pérez, Brian E. Granger, IPython: A System for Interactive Scientific Computing, Computing in Science and Engineering, vol. 9, no. 3, pp. 21-29, May/June 2007, doi:10.1109/MCSE.2007.53. URL: http://ipython.org
```
from IPython.core.display import HTML, Image
css_file = 'style.css'
HTML(open(css_file, 'r').read())
from IPython.core.display import HTML, Image
css_file = 'style.css'
HTML(open(css_file, 'r').read())
from sympy import init_printing, Matrix, symbols, sqrt
from warnings import filterwarnings
init_printing(use_latex = 'mathjax')
filterwarnings('ignore')
```
# Symmetric matrices
# Positive definite matrices
## Symmetric matrices
* Symmetric matrices are square with the following property
$$ {A}={A}^{T} $$
* We are concerned with the eigenvalues and eigenvectors of symmetric matrices
* The eigenvalues are real
* The eigenvectors are orthogonal, or at least, can be chosen orthogonal
* Considering proof of the real nature of eigenvalues we have the following
* Any matrix equation of the following example can be changed to its complex conjugate form by changing each element into its complex conjugate form (here marked with a bar over the top)
$$ {A}\underline{x}={\lambda}\underline{x} \\ {A}\overline{x}=\overline{\lambda}\overline{x} $$
* We can take the complex conjugate transpose of **x** on both sides
$$ \overline{x}^{T}A\underline{x}={\lambda}\overline{x}^{T}\underline{x}\dots\left(1\right) $$
* In the complex conjugate form this becomes the following
$$ \overline{x}^{T}{A}^{T}\underline{x}=\overline{x}^{T}{\lambda}\underline{x} $$
* Now if A is symmetric we use the fact that A=A<sup>T</sup>
$$ \overline{x}^{T}{A}\underline{x}=\overline{x}^{T}\overline{\lambda}\underline{x}\dots\left(2\right) $$
* Note how the right-hand sides of (1) and (2) are equal and we therefor have the following
$$ \lambda\overline{x}^{T}\underline{x}=\overline\lambda\overline{x}^{T}\underline{x} $$
* This means the following
$$ \lambda=\overline\lambda $$
* The only ways that this is possible is if the imaginary part is zero and only real eigenvalues are possible
* Note also what happens if the complex conjugate of the vector **x** is multiplied by the vector itself
* Remember that **x**<sup>T</sup>**x** is a form of the dot product (which is the length squared)
* Any number times its complex conjugate gets rid of the imaginary part
* Consider the following symmetric matrix A
```
A = Matrix([[5, 2], [2, 3]])
A
```
* Let's see if it really is symmetric by making sure that it is equal to it's transpose
```
A == A.transpose() # Boolean (true or false) statement
S, D = A.diagonalize()
```
* S, the matrix containing the eigenvectors as it's columns
* Remember that these eigenvectors are not necessarily the same as those you would get doing these problems by hand
* When substituting the values for λ<sub>i</sub> a singular matrix is created with rows that are simply linear combinations of each other
* You are free to choose values for the components of the eigenvectors for each eigenvalue (usually choosing the simplest ones)
```
S
```
* D, the matrix containing the values of the eigenvalues down the main diagonal
```
D
```
* In decomposition, a symmetric matrix results in the following
$$ {A}={S}{\Lambda}{S}^{T} $$
* In this case we have an orthogonal matrix times diagonal matrix times transpose of orthogonal matrix
$$ {A}={Q}{\Lambda}{Q}^{T} $$
```
A.eigenvals()
A.eigenvects()
```
* We've seen in our example that, indeed, the eigenvalues are real
* Let's see of the eigenvectors are orthogonal by looking at their dot product
```
eigenvec_1 = Matrix([-2 / (1 + sqrt(5)), 1])
eigenvec_2 = Matrix([-2 / (1 - sqrt(5)), 1])
eigenvec_1.dot(eigenvec_2)
```
* This is certainly zero when simplified
```
(eigenvec_1.dot(eigenvec_2)).simplify() # Using the simplify() method
```
* We need not use symbolic computing (computer algebra system, CAS)
* Let's look at numerical evaluation using numerical python (numpy)
```
import numpy as np # Using namespace abbreviations
A = np.matrix([[5, 2], [2, 3]])
A
w, v = np.linalg.eig(A) # Calculating the eigenvalues and eigenvectors
# The result of np.linalg.eig() is a tuple, the first being the eigenvalues
# The second being the eigenvectors
w
v
# Creating the diagonal matrix manually from the eigenvalues
D = np.matrix([[6.23606798, 0], [0, 1.76393202]])
D
# Checking to see if our equation for A holds
v * D * np.matrix.transpose(v)
```
## Positive definite matrices (referring to symmetric matrices)
* The properties of positive definite (symmetric) matrices
* All eigenvalues are positive
* All pivots are positive
* All determinants (actually also all *sub*-determinants) are positive
* The fact that a (square symmetric) matrix A is invertible implies the following
* The determinant is non-zero (actually larger than zero)
* The determinant is the product of the eigenvalues
* The determinant must therefor be larger than zero
* For projection matrices
* The eigenvalues are either 0 or 1
* If this projection matrix is positive definite
* The eigenvalues must all be 1 (since they must be larger than zero)
* The only matrix that satisfies this property is the identity matrix
* The diagonal matrix D is positive definite
* This means that for any non-zero vector **x** we have **x**<sup>T</sup>D**x**>0
* Let's look at a 3-component vector with a 3×3 matrix D
```
d1, d2, d3, x1, x2, x3 = symbols('d1 d2 d3 x1 x2 x3')
D = Matrix([[d1, 0, 0], [0, d2, 0], [0, 0, d3]])
x_vect = Matrix([x1, x2, x3])
x_vect.transpose(), D, x_vect
```
* Indeed we have **x**<sup>T</sup>D**x**>0 since the components if **x** are squared and the eigenvalues are all positive
```
x_vect.transpose() * D * x_vect
```
* Not all symmetric matrices with a positive determinant are definite positive
* Easy matrices to construct with this property have negative values on the main diagonal
* Note below how the eigenvalues are not all more than zero
* Also note how **x**<sup>T</sup>D**x**≯0
* It is important to note that the *sub*-determinant must also be positive
* In the example below the *sub*-determinant of 3 is -1
```
A = Matrix([[3, 1], [1, -1]])
A
A == A.transpose()
A.det()
A.eigenvals()
A.eigenvects()
S, D = A.diagonalize()
S
D
x_vect = Matrix([x1, x2])
x_vect
x_vect.transpose() * D * x_vect
```
* In this example the *sub*-determinant of 1 is -3
```
A = Matrix([[-3, 1], [1, 1]])
A
A == A.transpose()
S, D = A.diagonalize()
x_vect.transpose() * D * x_vect
```
|
github_jupyter
|
from IPython.core.display import HTML, Image
css_file = 'style.css'
HTML(open(css_file, 'r').read())
from IPython.core.display import HTML, Image
css_file = 'style.css'
HTML(open(css_file, 'r').read())
from sympy import init_printing, Matrix, symbols, sqrt
from warnings import filterwarnings
init_printing(use_latex = 'mathjax')
filterwarnings('ignore')
A = Matrix([[5, 2], [2, 3]])
A
A == A.transpose() # Boolean (true or false) statement
S, D = A.diagonalize()
S
D
A.eigenvals()
A.eigenvects()
eigenvec_1 = Matrix([-2 / (1 + sqrt(5)), 1])
eigenvec_2 = Matrix([-2 / (1 - sqrt(5)), 1])
eigenvec_1.dot(eigenvec_2)
(eigenvec_1.dot(eigenvec_2)).simplify() # Using the simplify() method
import numpy as np # Using namespace abbreviations
A = np.matrix([[5, 2], [2, 3]])
A
w, v = np.linalg.eig(A) # Calculating the eigenvalues and eigenvectors
# The result of np.linalg.eig() is a tuple, the first being the eigenvalues
# The second being the eigenvectors
w
v
# Creating the diagonal matrix manually from the eigenvalues
D = np.matrix([[6.23606798, 0], [0, 1.76393202]])
D
# Checking to see if our equation for A holds
v * D * np.matrix.transpose(v)
d1, d2, d3, x1, x2, x3 = symbols('d1 d2 d3 x1 x2 x3')
D = Matrix([[d1, 0, 0], [0, d2, 0], [0, 0, d3]])
x_vect = Matrix([x1, x2, x3])
x_vect.transpose(), D, x_vect
x_vect.transpose() * D * x_vect
A = Matrix([[3, 1], [1, -1]])
A
A == A.transpose()
A.det()
A.eigenvals()
A.eigenvects()
S, D = A.diagonalize()
S
D
x_vect = Matrix([x1, x2])
x_vect
x_vect.transpose() * D * x_vect
A = Matrix([[-3, 1], [1, 1]])
A
A == A.transpose()
S, D = A.diagonalize()
x_vect.transpose() * D * x_vect
| 0.670393 | 0.948822 |
# MAK DRAFT HELPER 2019
```
import pandas as pd
import numpy as np
```
## Batters
```
batters = pd.read_csv('./FantasyPros_2019_Projections_H.csv')
```
Fantasy Pros (fantasypros.com) produces an aggregated expert projections for each player across several key stats. No need to reinvent the wheel like Nate Silver did in his book with inefficient models - we just take what the experts project (using their models) and look at their consensus.
```
batters.head()
del batters['OBP']
del batters['2B']
del batters['3B']
del batters['SLG']
del batters['OPS']
batters = batters[batters['AB'] >= 200]
batters['BB/KO'] = batters['BB'] / batters['SO']
```
For BB/KO, we want to look at the number of walks ABOVE the average BB/KO of the selection set (which is all batters projected to have over 200 AB's). This will approproately weight the average based on projected Fantasy production. We do the same with batting average.
```
bbko_mean = batters['BB/KO'].mean()
bbko_mean
batters['BB+'] = (batters['BB/KO'] - bbko_mean) * batters['SO']
batters['AVG'] = batters['H'] / batters['AB']
avg_mean = batters['AVG'].mean()
avg_mean
batters['AVG+'] = (batters['AVG'] - avg_mean) * batters['AB']
```
For this year, I'm simply standardizing each metric to the 0-100 linear scale with 100 being the top performer for that metric and 0 being the bottom. Since the scales are different for each one, this makes sense. In the past, I've used Z scores, but it is easier to be able to mentally calculate the actual metric in some cases. It also makes sense since these stats all accumulate over the course of the season/week.
```
batters['AVGS'] = round( ( batters['AVG+'] - batters['AVG+'].min() ) / ( batters['AVG+'].max() - batters['AVG+'].min() ) * 100 )
batters['BBKOS'] = round( ( batters['BB+'] - batters['BB+'].min() ) / ( batters['BB+'].max() - batters['BB+'].min() ) * 100 )
batters['HRS'] = round( ( batters['HR'] - batters['HR'].min() ) / ( batters['HR'].max() - batters['HR'].min() ) * 100 )
batters['RS'] = round( ( batters['R'] - batters['R'].min() ) / ( batters['R'].max() - batters['R'].min() ) * 100 )
batters['RBIS'] = round( ( batters['RBI'] - batters['RBI'].min() ) / ( batters['RBI'].max() - batters['RBI'].min() ) * 100 )
batters['SBS'] = round( ( batters['SB'] - batters['SB'].min() ) / ( batters['SB'].max() - batters['SB'].min() ) * 100 )
batters['SCORE'] = batters['AVGS'] + batters['BBKOS'] + batters['HRS'] + batters['RS'] + batters['RBIS'] + batters['SBS']
batters['RANK'] = batters['SCORE'].rank(method='average', na_option='keep', ascending=False, pct=False)
batters.sort_values(['SCORE'], ascending=[0])[0:5]
```
## Pitchers
```
pitchers = pd.read_csv('./FantasyPros_2019_Projections_P.csv')
pitchers.head()
```
We score on Holds+SVS, so I need to bring that stat in from elsewhere
```
holds = pd.read_csv('./cbs_P_Scoring.csv', header=1)
holds.head()
holds = holds[ holds['HD'] > 0 ]
holds['Name'] = holds['Player']
holds['Player'] = holds['Name'].str.extract(r'^(.+ .+) \w+ \|', expand=False)
holds['Team'] = holds['Name'].str.extract(r'\|\s(\w\w\w|\w\w)(?:\s|$)', expand=False)
holds = holds[ ['Player', 'Team', 'HD'] ]
holds = holds.replace('WAS', 'WSH')
holds = holds.replace('CHW', 'CWS')
holds.head()
pitchers = pitchers.merge(holds, on=['Player', 'Team'], how='left')
pitchers[pitchers['Player'].isnull()].head()
del pitchers['HR']
del pitchers['G']
del pitchers['GS']
del pitchers['CG']
pitchers = pitchers[pitchers['IP'] >= 40]
pitchers['ERA'] = pitchers['ER'] / pitchers['IP'] * 9
era_mean = pitchers['ERA'].mean()
pitchers['ERA+'] = ( era_mean - pitchers['ERA'] ) * pitchers['IP']
pitchers['WHIP'] = (pitchers['BB'] + pitchers['H']) / pitchers['IP']
whip_mean = pitchers['WHIP'].mean()
pitchers['WHIP+'] = ( whip_mean - pitchers['WHIP'] ) * pitchers['IP']
pitchers['W-L'] = pitchers['W'] - pitchers['L']
pitchers['HD'].fillna(0, inplace=True)
pitchers['SV+HD'] = pitchers['SV'] + pitchers['HD']
pitchers['ERAS'] = round( ( pitchers['ERA+'] - pitchers['ERA+'].min() ) / ( pitchers['ERA+'].max() - pitchers['ERA+'].min() ) * 100 )
pitchers['WHIPS'] = round( ( pitchers['WHIP+'] - pitchers['WHIP+'].min() ) / ( pitchers['WHIP+'].max() - pitchers['WHIP+'].min() ) * 100 )
pitchers['KS'] = round( ( pitchers['K'] - pitchers['K'].min() ) / ( pitchers['K'].max() - pitchers['K'].min() ) * 100 )
pitchers['SV+HDS'] = round( ( pitchers['SV+HD'] - pitchers['SV+HD'].min() ) / ( pitchers['SV+HD'].max() - pitchers['SV+HD'].min() ) * 100 )
pitchers['WLS'] = round( ( pitchers['W-L'] - pitchers['W-L'].min() ) / ( pitchers['W-L'].max() - pitchers['W-L'].min() ) * 100 )
pitchers['SCORE'] = pitchers['ERAS'] + pitchers['WHIPS'] + pitchers['KS'] + pitchers['SV+HDS'] + pitchers['WLS']
pitchers['RANK'] = pitchers['SCORE'].rank(method='average', na_option='keep', ascending=False, pct=False)
pitchers.sort_values(['SCORE'], ascending=[0])[0:5]
```
## Combining Pitchers and Batters
Since we score across five pitching and six hitting categories, with the weekly winners determined by the highest aggregate scores in each category, it makes sense to compare score weight of batters vs. pitchers
For the last couple of seasons, I've tried to "break the game" by focusing only on all the pitching categories plus two batting categories (AVG and BBKO) where there are batters generally available with high performance at a low cost. This has had mixed results, as the league adjusted accordingly.
```
batters_lim = batters[ ['Player', 'Team', 'Positions', 'AVGS', 'BBKOS', 'HRS', 'RS', 'RBIS', 'SBS', 'SCORE'] ]
pitchers_lim = pitchers[ ['Player', 'Team', 'Positions', 'ERAS', 'WHIPS', 'KS', 'SV+HDS', 'WLS', 'SCORE'] ]
all_players = pd.concat([batters_lim, pitchers_lim],sort=True)
all_players['RANK'] = all_players['SCORE'].rank(method='average', na_option='keep', ascending=False, pct=False)
all_players = all_players.sort_values(['SCORE'], ascending=[0])
all_players.head(25)
```
## Join on list of currently drafted players
```
draft = pd.read_csv('./results.csv', header=1)
draft.head()
draft = draft[ ['Player', 'Team'] ]
draft['Name'] = draft['Player']
draft['Owner'] = draft['Team']
draft = draft[ draft['Player'] != 'Player' ]
draft['Player'] = draft['Name'].str.extract(r'^(.+ .+) \w+ \|', expand=False)
draft['Team'] = draft['Name'].str.extract(r'\|\s(\w\w\w|\w\w)(?:\s|$)', expand=False)
draft = draft.dropna()
draft = draft.replace('WAS', 'WSH')
draft = draft.replace('CHW', 'CWS')
draft.head()
draft.loc[draft.Player == 'Craig Kimbrel', 'Team'] = np.nan
all_players_owners = all_players.merge(draft, on=['Player', 'Team'], how='outer')
all_players_owners.head()
```
Ensure all players merged. Cross fingers that there aren't any players on the same team with the same name. That would be weird though. This takes advatage of the characterisitics of an outer join.
```
all_players_owners[all_players_owners['Player'].isnull()].head()
all_players_owners[all_players_owners['Positions'].isnull()].head()
```
## All Currently Undrafted
```
all_players_owners = all_players_owners[ ['Player', 'Positions', 'Team', 'SCORE', 'RANK', 'AVGS', 'BBKOS', 'HRS', 'RBIS', 'RS', 'SBS', 'ERAS', 'WHIPS', 'KS', 'WLS', 'SV+HDS', 'Owner'] ]
#Manually add minors drafted
all_players_owners.loc[all_players_owners.Player == 'Vladimir Guerrero Jr.', 'Owner'] = 'Low hanging fruit'
free_agents = all_players_owners[all_players_owners['Owner'].isnull()]
del free_agents['Owner']
free_agents.head(50)
```
## Positions
### 1B
```
first_base = all_players_owners[all_players_owners['Positions'].str.contains('1B')]
first_base = first_base[ ['Player', 'Positions', 'Team', 'SCORE', 'RANK', 'AVGS', 'BBKOS', 'HRS', 'RBIS', 'RS', 'SBS', 'Owner'] ]
first_base.head(25)
```
### 2B
```
second_base = all_players_owners[all_players_owners['Positions'].str.contains('2B')]
second_base = second_base[ ['Player', 'Positions', 'Team', 'SCORE', 'RANK', 'AVGS', 'BBKOS', 'HRS', 'RBIS', 'RS', 'SBS', 'Owner'] ]
second_base.head(25)
```
### SS
```
shortstop = all_players_owners[all_players_owners['Positions'].str.contains('SS')]
shortstop = shortstop[ ['Player', 'Positions', 'Team', 'SCORE', 'RANK', 'AVGS', 'BBKOS', 'HRS', 'RBIS', 'RS', 'SBS', 'Owner'] ]
shortstop.head(25)
```
### 3B
```
third_base = all_players_owners[all_players_owners['Positions'].str.contains('3B')]
third_base = third_base[ ['Player', 'Positions', 'Team', 'SCORE', 'RANK', 'AVGS', 'BBKOS', 'HRS', 'RBIS', 'RS', 'SBS', 'Owner'] ]
third_base.head(25)
```
### C
```
catcher = all_players_owners[all_players_owners['Positions'].str.contains('^C$|C,', regex=True)]
catcher = catcher[ ['Player', 'Positions', 'Team', 'SCORE', 'RANK', 'AVGS', 'BBKOS', 'HRS', 'RBIS', 'RS', 'SBS', 'Owner'] ]
catcher.head(25)
```
### OF
```
outfield = all_players_owners[all_players_owners['Positions'].str.contains('OF|LF|RF|CF', regex=True)]
outfield = outfield[ ['Player', 'Positions', 'Team', 'SCORE', 'RANK', 'AVGS', 'BBKOS', 'HRS', 'RBIS', 'RS', 'SBS', 'Owner'] ]
outfield[:25]
outfield[25:50]
#outfield[50:75]
```
### SP
```
starter = all_players_owners[all_players_owners['Positions'].str.contains('SP')]
starter = starter[ ['Player', 'Positions', 'Team', 'SCORE', 'RANK', 'ERAS', 'WHIPS', 'KS', 'WLS', 'SV+HDS', 'Owner'] ]
starter[:25]
starter[25:50]
```
### RP
```
relief = all_players_owners[all_players_owners['Positions'].str.contains('RP')]
relief = relief[ ['Player', 'Positions', 'Team', 'SCORE', 'RANK', 'ERAS', 'WHIPS', 'KS', 'WLS', 'SV+HDS', 'Owner'] ]
relief[:25]
```
## My Team
```
my_team = all_players_owners[ all_players_owners['Owner'] == 'Tommy\'s Shinebox' ]
my_team
round(my_team[ ['AVGS', 'BBKOS', 'HRS', 'RBIS', 'RS', 'SBS', 'ERAS', 'WHIPS', 'KS', 'WLS', 'SV+HDS'] ].mean())
```
## Roto Analysis
```
cols = ['Team', 'Count', 'AVGS', 'BBKOS', 'HRS', 'RBIS', 'RS', 'SBS', 'ERAS', 'WHIPS', 'KS', 'WLS', 'SV+HDS' ]
roto_score = pd.DataFrame(columns = cols)
for team in pd.unique(all_players_owners['Owner'].dropna()):
roster = all_players_owners[ all_players_owners['Owner'] == team ]
row = []
row.append(team)
row.append(len(roster))
for stat in ['AVGS', 'BBKOS', 'HRS', 'RBIS', 'RS', 'SBS', 'ERAS', 'WHIPS', 'KS', 'WLS', 'SV+HDS']:
if roster[stat].isnull().all():
row.append(0)
else:
row.append(round(roster[stat].mean()))
row = tuple(row)
rosterdf = pd.DataFrame([row], columns=cols)
roto_score = roto_score.append(rosterdf, ignore_index=True)
roto_score
cols = ['AVGS', 'BBKOS', 'HRS', 'RBIS', 'RS', 'SBS', 'ERAS', 'WHIPS', 'KS', 'WLS', 'SV+HDS' ]
for col in cols:
roto_score[col] = roto_score[col].rank(method='average', na_option='keep', ascending=True, pct=False)
roto_score['Score'] = roto_score[cols].sum(axis=1)
roto_score.sort_values(['Score'], ascending=False)
roto_score_adj = roto_score
del roto_score_adj['SV+HDS']
cols = ['AVGS', 'BBKOS', 'HRS', 'RBIS', 'RS', 'SBS', 'ERAS', 'WHIPS', 'KS', 'WLS' ]
roto_score_adj['Score'] = roto_score[cols].sum(axis=1)
roto_score_adj.sort_values(['Score'], ascending=False)
```
|
github_jupyter
|
import pandas as pd
import numpy as np
batters = pd.read_csv('./FantasyPros_2019_Projections_H.csv')
batters.head()
del batters['OBP']
del batters['2B']
del batters['3B']
del batters['SLG']
del batters['OPS']
batters = batters[batters['AB'] >= 200]
batters['BB/KO'] = batters['BB'] / batters['SO']
bbko_mean = batters['BB/KO'].mean()
bbko_mean
batters['BB+'] = (batters['BB/KO'] - bbko_mean) * batters['SO']
batters['AVG'] = batters['H'] / batters['AB']
avg_mean = batters['AVG'].mean()
avg_mean
batters['AVG+'] = (batters['AVG'] - avg_mean) * batters['AB']
batters['AVGS'] = round( ( batters['AVG+'] - batters['AVG+'].min() ) / ( batters['AVG+'].max() - batters['AVG+'].min() ) * 100 )
batters['BBKOS'] = round( ( batters['BB+'] - batters['BB+'].min() ) / ( batters['BB+'].max() - batters['BB+'].min() ) * 100 )
batters['HRS'] = round( ( batters['HR'] - batters['HR'].min() ) / ( batters['HR'].max() - batters['HR'].min() ) * 100 )
batters['RS'] = round( ( batters['R'] - batters['R'].min() ) / ( batters['R'].max() - batters['R'].min() ) * 100 )
batters['RBIS'] = round( ( batters['RBI'] - batters['RBI'].min() ) / ( batters['RBI'].max() - batters['RBI'].min() ) * 100 )
batters['SBS'] = round( ( batters['SB'] - batters['SB'].min() ) / ( batters['SB'].max() - batters['SB'].min() ) * 100 )
batters['SCORE'] = batters['AVGS'] + batters['BBKOS'] + batters['HRS'] + batters['RS'] + batters['RBIS'] + batters['SBS']
batters['RANK'] = batters['SCORE'].rank(method='average', na_option='keep', ascending=False, pct=False)
batters.sort_values(['SCORE'], ascending=[0])[0:5]
pitchers = pd.read_csv('./FantasyPros_2019_Projections_P.csv')
pitchers.head()
holds = pd.read_csv('./cbs_P_Scoring.csv', header=1)
holds.head()
holds = holds[ holds['HD'] > 0 ]
holds['Name'] = holds['Player']
holds['Player'] = holds['Name'].str.extract(r'^(.+ .+) \w+ \|', expand=False)
holds['Team'] = holds['Name'].str.extract(r'\|\s(\w\w\w|\w\w)(?:\s|$)', expand=False)
holds = holds[ ['Player', 'Team', 'HD'] ]
holds = holds.replace('WAS', 'WSH')
holds = holds.replace('CHW', 'CWS')
holds.head()
pitchers = pitchers.merge(holds, on=['Player', 'Team'], how='left')
pitchers[pitchers['Player'].isnull()].head()
del pitchers['HR']
del pitchers['G']
del pitchers['GS']
del pitchers['CG']
pitchers = pitchers[pitchers['IP'] >= 40]
pitchers['ERA'] = pitchers['ER'] / pitchers['IP'] * 9
era_mean = pitchers['ERA'].mean()
pitchers['ERA+'] = ( era_mean - pitchers['ERA'] ) * pitchers['IP']
pitchers['WHIP'] = (pitchers['BB'] + pitchers['H']) / pitchers['IP']
whip_mean = pitchers['WHIP'].mean()
pitchers['WHIP+'] = ( whip_mean - pitchers['WHIP'] ) * pitchers['IP']
pitchers['W-L'] = pitchers['W'] - pitchers['L']
pitchers['HD'].fillna(0, inplace=True)
pitchers['SV+HD'] = pitchers['SV'] + pitchers['HD']
pitchers['ERAS'] = round( ( pitchers['ERA+'] - pitchers['ERA+'].min() ) / ( pitchers['ERA+'].max() - pitchers['ERA+'].min() ) * 100 )
pitchers['WHIPS'] = round( ( pitchers['WHIP+'] - pitchers['WHIP+'].min() ) / ( pitchers['WHIP+'].max() - pitchers['WHIP+'].min() ) * 100 )
pitchers['KS'] = round( ( pitchers['K'] - pitchers['K'].min() ) / ( pitchers['K'].max() - pitchers['K'].min() ) * 100 )
pitchers['SV+HDS'] = round( ( pitchers['SV+HD'] - pitchers['SV+HD'].min() ) / ( pitchers['SV+HD'].max() - pitchers['SV+HD'].min() ) * 100 )
pitchers['WLS'] = round( ( pitchers['W-L'] - pitchers['W-L'].min() ) / ( pitchers['W-L'].max() - pitchers['W-L'].min() ) * 100 )
pitchers['SCORE'] = pitchers['ERAS'] + pitchers['WHIPS'] + pitchers['KS'] + pitchers['SV+HDS'] + pitchers['WLS']
pitchers['RANK'] = pitchers['SCORE'].rank(method='average', na_option='keep', ascending=False, pct=False)
pitchers.sort_values(['SCORE'], ascending=[0])[0:5]
batters_lim = batters[ ['Player', 'Team', 'Positions', 'AVGS', 'BBKOS', 'HRS', 'RS', 'RBIS', 'SBS', 'SCORE'] ]
pitchers_lim = pitchers[ ['Player', 'Team', 'Positions', 'ERAS', 'WHIPS', 'KS', 'SV+HDS', 'WLS', 'SCORE'] ]
all_players = pd.concat([batters_lim, pitchers_lim],sort=True)
all_players['RANK'] = all_players['SCORE'].rank(method='average', na_option='keep', ascending=False, pct=False)
all_players = all_players.sort_values(['SCORE'], ascending=[0])
all_players.head(25)
draft = pd.read_csv('./results.csv', header=1)
draft.head()
draft = draft[ ['Player', 'Team'] ]
draft['Name'] = draft['Player']
draft['Owner'] = draft['Team']
draft = draft[ draft['Player'] != 'Player' ]
draft['Player'] = draft['Name'].str.extract(r'^(.+ .+) \w+ \|', expand=False)
draft['Team'] = draft['Name'].str.extract(r'\|\s(\w\w\w|\w\w)(?:\s|$)', expand=False)
draft = draft.dropna()
draft = draft.replace('WAS', 'WSH')
draft = draft.replace('CHW', 'CWS')
draft.head()
draft.loc[draft.Player == 'Craig Kimbrel', 'Team'] = np.nan
all_players_owners = all_players.merge(draft, on=['Player', 'Team'], how='outer')
all_players_owners.head()
all_players_owners[all_players_owners['Player'].isnull()].head()
all_players_owners[all_players_owners['Positions'].isnull()].head()
all_players_owners = all_players_owners[ ['Player', 'Positions', 'Team', 'SCORE', 'RANK', 'AVGS', 'BBKOS', 'HRS', 'RBIS', 'RS', 'SBS', 'ERAS', 'WHIPS', 'KS', 'WLS', 'SV+HDS', 'Owner'] ]
#Manually add minors drafted
all_players_owners.loc[all_players_owners.Player == 'Vladimir Guerrero Jr.', 'Owner'] = 'Low hanging fruit'
free_agents = all_players_owners[all_players_owners['Owner'].isnull()]
del free_agents['Owner']
free_agents.head(50)
first_base = all_players_owners[all_players_owners['Positions'].str.contains('1B')]
first_base = first_base[ ['Player', 'Positions', 'Team', 'SCORE', 'RANK', 'AVGS', 'BBKOS', 'HRS', 'RBIS', 'RS', 'SBS', 'Owner'] ]
first_base.head(25)
second_base = all_players_owners[all_players_owners['Positions'].str.contains('2B')]
second_base = second_base[ ['Player', 'Positions', 'Team', 'SCORE', 'RANK', 'AVGS', 'BBKOS', 'HRS', 'RBIS', 'RS', 'SBS', 'Owner'] ]
second_base.head(25)
shortstop = all_players_owners[all_players_owners['Positions'].str.contains('SS')]
shortstop = shortstop[ ['Player', 'Positions', 'Team', 'SCORE', 'RANK', 'AVGS', 'BBKOS', 'HRS', 'RBIS', 'RS', 'SBS', 'Owner'] ]
shortstop.head(25)
third_base = all_players_owners[all_players_owners['Positions'].str.contains('3B')]
third_base = third_base[ ['Player', 'Positions', 'Team', 'SCORE', 'RANK', 'AVGS', 'BBKOS', 'HRS', 'RBIS', 'RS', 'SBS', 'Owner'] ]
third_base.head(25)
catcher = all_players_owners[all_players_owners['Positions'].str.contains('^C$|C,', regex=True)]
catcher = catcher[ ['Player', 'Positions', 'Team', 'SCORE', 'RANK', 'AVGS', 'BBKOS', 'HRS', 'RBIS', 'RS', 'SBS', 'Owner'] ]
catcher.head(25)
outfield = all_players_owners[all_players_owners['Positions'].str.contains('OF|LF|RF|CF', regex=True)]
outfield = outfield[ ['Player', 'Positions', 'Team', 'SCORE', 'RANK', 'AVGS', 'BBKOS', 'HRS', 'RBIS', 'RS', 'SBS', 'Owner'] ]
outfield[:25]
outfield[25:50]
#outfield[50:75]
starter = all_players_owners[all_players_owners['Positions'].str.contains('SP')]
starter = starter[ ['Player', 'Positions', 'Team', 'SCORE', 'RANK', 'ERAS', 'WHIPS', 'KS', 'WLS', 'SV+HDS', 'Owner'] ]
starter[:25]
starter[25:50]
relief = all_players_owners[all_players_owners['Positions'].str.contains('RP')]
relief = relief[ ['Player', 'Positions', 'Team', 'SCORE', 'RANK', 'ERAS', 'WHIPS', 'KS', 'WLS', 'SV+HDS', 'Owner'] ]
relief[:25]
my_team = all_players_owners[ all_players_owners['Owner'] == 'Tommy\'s Shinebox' ]
my_team
round(my_team[ ['AVGS', 'BBKOS', 'HRS', 'RBIS', 'RS', 'SBS', 'ERAS', 'WHIPS', 'KS', 'WLS', 'SV+HDS'] ].mean())
cols = ['Team', 'Count', 'AVGS', 'BBKOS', 'HRS', 'RBIS', 'RS', 'SBS', 'ERAS', 'WHIPS', 'KS', 'WLS', 'SV+HDS' ]
roto_score = pd.DataFrame(columns = cols)
for team in pd.unique(all_players_owners['Owner'].dropna()):
roster = all_players_owners[ all_players_owners['Owner'] == team ]
row = []
row.append(team)
row.append(len(roster))
for stat in ['AVGS', 'BBKOS', 'HRS', 'RBIS', 'RS', 'SBS', 'ERAS', 'WHIPS', 'KS', 'WLS', 'SV+HDS']:
if roster[stat].isnull().all():
row.append(0)
else:
row.append(round(roster[stat].mean()))
row = tuple(row)
rosterdf = pd.DataFrame([row], columns=cols)
roto_score = roto_score.append(rosterdf, ignore_index=True)
roto_score
cols = ['AVGS', 'BBKOS', 'HRS', 'RBIS', 'RS', 'SBS', 'ERAS', 'WHIPS', 'KS', 'WLS', 'SV+HDS' ]
for col in cols:
roto_score[col] = roto_score[col].rank(method='average', na_option='keep', ascending=True, pct=False)
roto_score['Score'] = roto_score[cols].sum(axis=1)
roto_score.sort_values(['Score'], ascending=False)
roto_score_adj = roto_score
del roto_score_adj['SV+HDS']
cols = ['AVGS', 'BBKOS', 'HRS', 'RBIS', 'RS', 'SBS', 'ERAS', 'WHIPS', 'KS', 'WLS' ]
roto_score_adj['Score'] = roto_score[cols].sum(axis=1)
roto_score_adj.sort_values(['Score'], ascending=False)
| 0.150434 | 0.649759 |
# Logistic regression from scratch
```
import pandas as pd
import numpy as np
def get_accuracy(x: pd.DataFrame, y: pd.Series, y_hat: pd.Series):
correct = y_hat == y
acc = np.sum(correct) / len(y)
cond = y == 1
y1 = len(y[cond])
y0 = len(y[~cond])
print(f'Class 0: tested {y0}, correctly classified {correct[~cond].sum()}')
print(f'Class 1: tested {y1}, correctly classified {correct[cond].sum()}')
print(f'Overall: tested {len(y)}, correctly classified {correct.sum()}')
print(f'Accuracy = {acc:.2f}')
class Classifier:
def __init__(self, dataset: str = None, mle: bool=True):
if dataset:
x_train, y_train = reader(f'datasets/{dataset}-train.txt')
x_test, y_test = reader(f'datasets/{dataset}-test.txt')
self.train(x_train, y_train, mle)
print('Training accuracy')
print('=' * 10)
self.accuracy(x_train, y_train)
print('Test accuracy')
print('=' * 10)
self.accuracy(x_test, y_test)
def accuracy(self, x: pd.DataFrame, y: pd.DataFrame) -> None:
y_hat = self.predict(x)
get_accuracy(x, y, y_hat)
class LR(Classifier):
def __init__(self,
eta: float = 1e-4,
epochs: int = int(1e4),
dataset: str = None):
self.ws = None
self.eta = eta
self.epochs = epochs
super().__init__(dataset, mle=True)
@staticmethod
def _sigmoid(x: np.array):
return 1 / (1 + np.exp(-x))
@staticmethod
def _prepend_intercept(x: pd.DataFrame) -> pd.DataFrame:
return pd.concat([pd.DataFrame([1] * len(x),
columns=['intercept']), x], axis=1)
def train(self, x: pd.DataFrame, y: pd.Series, mle=None) -> None:
xx = self._prepend_intercept(x)
self.ws = np.zeros(xx.shape[1])
for _ in range(self.epochs):
gs = (y - self._sigmoid(xx @ self.ws)) @ xx
self.ws += self.eta * gs.values
def predict(self, x: pd.DataFrame) -> pd.Series:
xx = self._prepend_intercept(x)
return (xx @ self.ws) > 0.5
def _ll(self, x: pd.DataFrame,
y: pd.DataFrame, ws: np.array) -> float:
s = self._sigmoid(x @ ws)
return y @ np.log(s) + (1 - y) @ np.log(1 - s)
def ll(self, x: pd.DataFrame, y: pd.Series) -> float:
xx = self._prepend_intercept(x)
return self._ll(xx, y, self.ws)
def ll_all_zero(self, x: pd.DataFrame, y: pd.Series) -> float:
xx = self._prepend_intercept(x)
ws_zero = np.zeros(xx.shape[1])
return self._ll(xx, y, ws_zero)
x = pd.DataFrame({'x1': [0, 0, 1, 1], 'x2': [0, 1, 0, 1]})
y = pd.Series([0, 0, 1, 1])
x
y
%%time
lr = LR(epochs=10, eta=1e-1)
lr.train(x, y)
lr.accuracy(x, y)
lr.ws
```
|
github_jupyter
|
import pandas as pd
import numpy as np
def get_accuracy(x: pd.DataFrame, y: pd.Series, y_hat: pd.Series):
correct = y_hat == y
acc = np.sum(correct) / len(y)
cond = y == 1
y1 = len(y[cond])
y0 = len(y[~cond])
print(f'Class 0: tested {y0}, correctly classified {correct[~cond].sum()}')
print(f'Class 1: tested {y1}, correctly classified {correct[cond].sum()}')
print(f'Overall: tested {len(y)}, correctly classified {correct.sum()}')
print(f'Accuracy = {acc:.2f}')
class Classifier:
def __init__(self, dataset: str = None, mle: bool=True):
if dataset:
x_train, y_train = reader(f'datasets/{dataset}-train.txt')
x_test, y_test = reader(f'datasets/{dataset}-test.txt')
self.train(x_train, y_train, mle)
print('Training accuracy')
print('=' * 10)
self.accuracy(x_train, y_train)
print('Test accuracy')
print('=' * 10)
self.accuracy(x_test, y_test)
def accuracy(self, x: pd.DataFrame, y: pd.DataFrame) -> None:
y_hat = self.predict(x)
get_accuracy(x, y, y_hat)
class LR(Classifier):
def __init__(self,
eta: float = 1e-4,
epochs: int = int(1e4),
dataset: str = None):
self.ws = None
self.eta = eta
self.epochs = epochs
super().__init__(dataset, mle=True)
@staticmethod
def _sigmoid(x: np.array):
return 1 / (1 + np.exp(-x))
@staticmethod
def _prepend_intercept(x: pd.DataFrame) -> pd.DataFrame:
return pd.concat([pd.DataFrame([1] * len(x),
columns=['intercept']), x], axis=1)
def train(self, x: pd.DataFrame, y: pd.Series, mle=None) -> None:
xx = self._prepend_intercept(x)
self.ws = np.zeros(xx.shape[1])
for _ in range(self.epochs):
gs = (y - self._sigmoid(xx @ self.ws)) @ xx
self.ws += self.eta * gs.values
def predict(self, x: pd.DataFrame) -> pd.Series:
xx = self._prepend_intercept(x)
return (xx @ self.ws) > 0.5
def _ll(self, x: pd.DataFrame,
y: pd.DataFrame, ws: np.array) -> float:
s = self._sigmoid(x @ ws)
return y @ np.log(s) + (1 - y) @ np.log(1 - s)
def ll(self, x: pd.DataFrame, y: pd.Series) -> float:
xx = self._prepend_intercept(x)
return self._ll(xx, y, self.ws)
def ll_all_zero(self, x: pd.DataFrame, y: pd.Series) -> float:
xx = self._prepend_intercept(x)
ws_zero = np.zeros(xx.shape[1])
return self._ll(xx, y, ws_zero)
x = pd.DataFrame({'x1': [0, 0, 1, 1], 'x2': [0, 1, 0, 1]})
y = pd.Series([0, 0, 1, 1])
x
y
%%time
lr = LR(epochs=10, eta=1e-1)
lr.train(x, y)
lr.accuracy(x, y)
lr.ws
| 0.641535 | 0.877161 |
# Chapter Two - Getting Started
## Insertion Sort
The insertion sort is a strategy for sorting an array by iterating over each item in the array and shifting it in one direction until it satisfies a greater-than or less-than condition. To put it another way, we will have two loops: one that iterates over all the values in an array that we want to sort, and a loop inside that loop that compares one value to all the values to the left, then decides where to put it.
In pseudocode, this looks something like:
```
initialize an outer index for iterating over values in the array
while outer index is less than the length of the whole array
initialize value at outer index
initialize a searching index that starts to the left of outer index
while the value at the searching index is less than the value at the outer index
shift the value at the searching index one place to the right
shift the outer index one place to the left
insert the value at the outer index one place to the right of the search index
shift the outer index one place to the right
```
In Python, it can be written:
```
def insertion_sort(array):
item_ix = 1
while item_ix < len(array):
item_value = array[item_ix]
search_ix = item_ix - 1
while (search_ix >= 0) and (item_value < array[search_ix]):
array[search_ix + 1] = array[search_ix]
search_ix -= 1
array[search_ix + 1] = item_value
item_ix += 1
return array
```
We can test this to make sure it's working with a random sequence of integers
```
from numpy import random
insertion_sort(random.randint(0,100,10))
```
## Merge sort
The insertion sort works pretty well with small arrays, but the amount of time it takes to sort an array increases exponentially. To sort more quickly, we can use a recursive algorithm that sorts arrays by splitting them up into smaller and smaller pieces, ordering them, and recombining them. The crucial step here is the recombining step, as this is where the merge sort gets its computational efficiency.
In pseudocode, we might write this like:
```
divide array into a left array and right array
while each array is longer than longer than one
call this function recursively to sort them
initialize separate indices for the array, and its left and right pieces
for each position in the array
put the smaller of the left or right arrays item at their respective indices
increment the index for the array that gave the value
```
To make this work in Python, we need to do two things. First, we need to import a function that copies mutable objects. By default, when Python initializes a mutable object, it creates a place in memory and all assignment statements that reference that object are just pointing to the location in memory. This means that if you assign two names to the same mutable object, changing one will change the other as well:
```
a = [1, 2, 3]
b = a
del a[0]
b
```
To make a different copy of an object in memory, so that we can operate on one while not changing the other, we will import a function called `deepcopy`.
```
from copy import deepcopy
a = [1, 2, 3]
b = deepcopy(a)
del a[0]
b
```
The other thing we'll need to do is define a function that returns something other than an exception when we try to access an item in an array past the last index of the array. The reason for this is that after one sub-array has run out of items to put back into the main array, it still has to give something to serve in the comparison between the left and right sub-arrays.
Here, we are going to have it return `infinity` to facilitate a `less than` comparison.
```
def get_value(array, ix):
try:
value = array[ix]
except IndexError:
value = float('inf')
return value
```
Now, we can transcribe our pseudocode into Python:
```
def merge_sort(array):
left_array = deepcopy(array[:(len(array)//2)])
right_array = deepcopy(array[(len(array)//2):])
if len(left_array) > 1:
left_array = merge_sort(left_array)
if len(right_array) > 1:
right_array = merge_sort(right_array)
left_ix = 0
right_ix = 0
array_ix = 0
while array_ix < len(array):
left_value = get_value(left_array, left_ix)
right_value = get_value(right_array, right_ix)
if left_value < right_value:
array[array_ix] = left_value
left_ix += 1
else:
array[array_ix] = right_value
right_ix += 1
array_ix += 1
return array
```
And as a sanity check, we have it sort a short list for us:
```
merge_sort(random.randint(0,100,10))
```
At small sizes, insert sort is faster than merge
```
%timeit insertion_sort(random.randint(0,100,100))
%timeit merge_sort(random.randint(0,100,100))
```
But at large sizes, merge_sort greatly outperforms insertion:
```
%timeit insertion_sort(random.randint(0,100,10000))
%timeit merge_sort(random.randint(0,100,10000))
```
## Combining sort strategies
If we want to make an even faster, algorithm, we can combine these two strategies where the sort type used is dependent on the size of the array. But what size should we pick? We know that insertion sort is going to scale exponentially compared to merge sort, so we can make a guess that they'll converge around an n of 200.
```
%timeit insertion_sort(random.randint(0,100,200))
%timeit merge_sort(random.randint(0,100,200))
```
That's pretty close, so let's try it in our combined algorithm:
```
def combined_sort(array):
left_array = deepcopy(array[:(len(array)//2)])
right_array = deepcopy(array[(len(array)//2):])
if len(left_array) > 200:
left_array = merge_sort(left_array)
else:
left_array = insertion_sort(left_array)
if len(right_array) > 200:
right_array = merge_sort(right_array)
else:
right_array = insertion_sort(right_array)
left_ix = 0
right_ix = 0
array_ix = 0
while array_ix < len(array):
left_value = get_value(left_array, left_ix)
right_value = get_value(right_array, right_ix)
if left_value < right_value:
array[array_ix] = left_value
left_ix += 1
else:
array[array_ix] = right_value
right_ix += 1
array_ix += 1
return array
```
Again, as a sanity check, we can call this on a small array:
```
combined_sort(random.randint(0,100,10))
```
And we can benchmark it against the other two sort methods:
```
%timeit combined_sort(random.randint(0,100,10000))
```
Interestingly, the way we have it coded here, this doesn't speed up the processing by that much. It's probable that, in Python, the additional communication cost of calling insertion sort and passing data to it are outweighing the efficiency gains of using insertion sort at small array sizes.
We can compare this to Python's base implementation of sort, called [timsort](http://svn.python.org/view/python/trunk/Objects/listsort.txt?revision=69846&view=markup):
```
%timeit sorted(random.randint(0,100,10000))
```
Timsort here is performing nearly two orders of magnitude better than our Python implementation, even though both are based on merge sort logic. Timsort, however, is written in C, and includes many optimizations for efficiency.
|
github_jupyter
|
initialize an outer index for iterating over values in the array
while outer index is less than the length of the whole array
initialize value at outer index
initialize a searching index that starts to the left of outer index
while the value at the searching index is less than the value at the outer index
shift the value at the searching index one place to the right
shift the outer index one place to the left
insert the value at the outer index one place to the right of the search index
shift the outer index one place to the right
def insertion_sort(array):
item_ix = 1
while item_ix < len(array):
item_value = array[item_ix]
search_ix = item_ix - 1
while (search_ix >= 0) and (item_value < array[search_ix]):
array[search_ix + 1] = array[search_ix]
search_ix -= 1
array[search_ix + 1] = item_value
item_ix += 1
return array
from numpy import random
insertion_sort(random.randint(0,100,10))
divide array into a left array and right array
while each array is longer than longer than one
call this function recursively to sort them
initialize separate indices for the array, and its left and right pieces
for each position in the array
put the smaller of the left or right arrays item at their respective indices
increment the index for the array that gave the value
a = [1, 2, 3]
b = a
del a[0]
b
from copy import deepcopy
a = [1, 2, 3]
b = deepcopy(a)
del a[0]
b
def get_value(array, ix):
try:
value = array[ix]
except IndexError:
value = float('inf')
return value
def merge_sort(array):
left_array = deepcopy(array[:(len(array)//2)])
right_array = deepcopy(array[(len(array)//2):])
if len(left_array) > 1:
left_array = merge_sort(left_array)
if len(right_array) > 1:
right_array = merge_sort(right_array)
left_ix = 0
right_ix = 0
array_ix = 0
while array_ix < len(array):
left_value = get_value(left_array, left_ix)
right_value = get_value(right_array, right_ix)
if left_value < right_value:
array[array_ix] = left_value
left_ix += 1
else:
array[array_ix] = right_value
right_ix += 1
array_ix += 1
return array
merge_sort(random.randint(0,100,10))
%timeit insertion_sort(random.randint(0,100,100))
%timeit merge_sort(random.randint(0,100,100))
%timeit insertion_sort(random.randint(0,100,10000))
%timeit merge_sort(random.randint(0,100,10000))
%timeit insertion_sort(random.randint(0,100,200))
%timeit merge_sort(random.randint(0,100,200))
def combined_sort(array):
left_array = deepcopy(array[:(len(array)//2)])
right_array = deepcopy(array[(len(array)//2):])
if len(left_array) > 200:
left_array = merge_sort(left_array)
else:
left_array = insertion_sort(left_array)
if len(right_array) > 200:
right_array = merge_sort(right_array)
else:
right_array = insertion_sort(right_array)
left_ix = 0
right_ix = 0
array_ix = 0
while array_ix < len(array):
left_value = get_value(left_array, left_ix)
right_value = get_value(right_array, right_ix)
if left_value < right_value:
array[array_ix] = left_value
left_ix += 1
else:
array[array_ix] = right_value
right_ix += 1
array_ix += 1
return array
combined_sort(random.randint(0,100,10))
%timeit combined_sort(random.randint(0,100,10000))
%timeit sorted(random.randint(0,100,10000))
| 0.446977 | 0.990533 |
#### Notebooks
- [Data Collection](./01_data_collection.ipynb)
- [Data Cleaning](./02_data_cleaning.ipynb)
- [Data Preprocessing](./03_data_preprocessing.ipynb)
- [EDA Five States](./04_eda_five_states.ipynb)
- [EDA California](./05_eda_ca.ipynb)
- [EDA Florida](./05_eda_fl.ipynb)
- [EDA Illinois](./05_eda_il.ipynb)
- [EDA New York](./05_eda_ny.ipynb)
- [EDA Texas](./05_eda_tx.ipynb)
- [Modeling Five States](./06_modeling_five_states.ipynb)
- [Modeling California](./07_modeling_ca.ipynb)
- [Modeling Florida](./07_modeling_fl.ipynb)
- [Modeling Illinois](./07_modeling_il.ipynb)
- [Modeling New York](./07_modeling_ny.ipynb)
- [Modeling Texas](./07_modeling_tx.ipynb)
- [Conclusions](./08_conclusions.ipynb)
#### This Notebook's Contents
- [Imports](#Data-Dictionary)
- [Regression Model: Explainable](#Regression-Model:-Explainable)
- [Regression Model: Best](#Regression-Model:-Best)
- [Classification Model: Best](#Classification-Model:-Best)
# California
# Imports
```
# Import the required libraries
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
plt.style.use('seaborn-whitegrid')
import warnings
warnings.filterwarnings('ignore')
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn.pipeline import make_pipeline
from sklearn.metrics import mean_squared_error, plot_confusion_matrix
from sklearn.linear_model import LinearRegression, Ridge
from sklearn.preprocessing import StandardScaler, PolynomialFeatures, PowerTransformer, QuantileTransformer
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import PCA
# Import the data as a dataframe and save to a variable.
df = pd.read_csv('../data/cleaned_engineered_ca.csv')
# Display the first few rows of the dataframe.
df.head(2)
```
# Regression Model: Explainable
The model achieved 69% R2.
```
# Assign our features to X and y.
X = df[[
'inc_per_capita_inc_dol',
'obes_percent',
'percent_sex_age_pop_under_5',
'percent_sex_age_pop_5_to_9',
'percent_sex_age_pop_10_to_14',
'percent_sex_age_pop_15_to_19',
'percent_sex_age_pop_20_to_24',
'percent_sex_age_pop_25_to_34',
'percent_sex_age_pop_35_to_44',
'percent_sex_age_pop_45_to_74',
'percent_sex_age_pop_75_and_over',
'percent_sex_age_pop_male',
'percent_health_ins_noninst_pop_cov_no',
'percent_race_pop_white_alone',
'percent_race_pop_hispanic_or_latino_of_any_race',
'percent_race_pop_two_or_more_races',
'pop_density',
'tests_per_100_people',
'percent_inc_hhlds_35_000_to_49_999',
]]
y = df['cases_per_100_people']
# Split X and y into train/test groups.
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# Display the shape of the training and test data.
X_train.shape, X_test.shape
# Create a pipeline.
pipe = make_pipeline(QuantileTransformer(), SelectKBest(f_regression), Ridge(max_iter=20_000))
# Set parameters for GridSearchCV.
params = {
'selectkbest__k': range(1,20),
'ridge__alpha': [.1, 1, 10, 20]
}
# Set up GridSearchCV with the pipe.
gs = GridSearchCV(pipe, param_grid=params, scoring='r2', cv=10, n_jobs=-1)
# Call fit on the GridSearchCV object and pass the training data.
gs.fit(X_train, y_train)
# Display the best parameters.
gs.best_params_
# Display the score for the test set.
print(f'Test R2 Score: {round(gs.score(X_test, y_test), 4)}.')
# Display the score for the training set.
print(f'Train R2 Score: {round(gs.score(X_train, y_train), 4)}.')
# Calculate the RMSE for the model.
y_preds = gs.predict(X_test)
print(f'RMSE of Model:{round(mean_squared_error(y_test, y_preds, squared=False), 4)}.')
# Plot the predicted versus the actual 'cases_per_100_people'.
plt.figure(figsize=(8,6))
sns.scatterplot(y_preds, y_test)
plt.title('California Actual vs. Predicted Cases/100 People', fontsize=16)
plt.xlabel('Predicted Cases/100 People', fontsize=14)
plt.ylabel('Actual Cases/100 People', fontsize=14);
# Extract a boolean matrix of features selectkbest used in the best model.
features = gs.best_estimator_.named_steps['selectkbest'].get_support()
# Overlay the boolean matrix with the features to get the features used.
features = X_train.columns[features==True]
# Extract the coefficients for the features used in the best model.
coefs = gs.best_estimator_.named_steps['ridge'].coef_
# Create a dataframe of the features and their coefficients.
coef_df = pd.DataFrame(list(zip(features, coefs)), columns=['Feature', 'Coefficients'])
coef_df = coef_df.set_index('Feature')
coef_df = coef_df.sort_values(by='Coefficients', ascending=False)
# Rename the index.
coef_df = coef_df.rename(index={
'inc_per_capita_inc_dol': 'Income Per Capita ($)',
'obes_percent': 'Obesity (%)',
'pop_density': 'Population Density',
'percent_health_ins_noninst_pop_cov_no': 'Health Insurance: None (%)',
'percent_sex_age_pop_male': 'Sex: Male (%)',
'percent_sex_age_pop_under_5': 'Age: Under 5 Yrs (%)',
'percent_sex_age_pop_5_to_9': 'Age: 5-9 Yrs (%)',
'percent_sex_age_pop_10_to_14': 'Age: 10-14 Yrs (%)',
'percent_sex_age_pop_15_to_19': 'Age: 15-19 Yrs (%)',
'percent_sex_age_pop_20_to_24': 'Age: 20-24 Yrs (%)',
'percent_sex_age_pop_25_to_34': 'Age: 25-34 Yrs (%)',
'percent_sex_age_pop_35_to_44': 'Age: 35-44 Yrs (%)',
'percent_sex_age_pop_45_to_74': 'Age: 45-75 Yrs (%)',
'percent_sex_age_pop_75_and_over': 'Age: 75+ Yrs (%)',
'percent_race_pop_asian_alone': 'Race: Asian (%)',
'percent_race_pop_american_indian_and_alaska_native_alone': 'Race: American Indian/Alaskan (%)',
'percent_race_pop_black_or_african_american_alone': 'Race: Black/African American (%)',
'percent_race_pop_hispanic_or_latino_of_any_race': 'Race: Hispanic/Latino (%)',
'percent_race_pop_two_or_more_races': 'Race: Two or More Races (%)',
'percent_race_pop_white_alone': 'Race: White (%)',
'tests_per_100_people': 'Tests/100 People'
})
# Plot the datatframe of the features and their coefficients.
coef_df.sort_values(by='Coefficients').plot(kind = 'barh', figsize=(6,6), legend=None)
plt.title('California Model Coefficients', fontsize=16)
plt.yticks(fontsize=14);
```
### Create a smaller coefficient dataframe
```
# Extract the top 10 coefficients.
coef_neg = coef_df.sort_values(by='Coefficients')[:5]
coef_pos = coef_df.sort_values(by='Coefficients', ascending=False)[:5]
coef_df = pd.concat([coef_neg, coef_pos]).sort_values(by='Coefficients', ascending=False)
# Plot the datatframe of the features and their coefficients.
coef_df.sort_values(by='Coefficients').plot(kind = 'barh', figsize=(4,4), legend=None)
plt.title('California Model Coefficients', fontsize=16)
plt.yticks(fontsize=14);
```
# Regression Model: Best
The model achieved 74% R2.
```
# Assign our features to X and y.
X = df[[
'inc_per_capita_inc_dol',
'obes_percent',
'percent_sex_age_pop_under_5',
'percent_sex_age_pop_5_to_9',
'percent_sex_age_pop_10_to_14',
'percent_sex_age_pop_15_to_19',
'percent_sex_age_pop_20_to_24',
'percent_sex_age_pop_25_to_34',
'percent_sex_age_pop_35_to_44',
'percent_sex_age_pop_45_to_74',
'percent_sex_age_pop_75_and_over',
'percent_sex_age_pop_male',
'percent_health_ins_noninst_pop_cov_no',
'percent_race_pop_white_alone',
'percent_race_pop_hispanic_or_latino_of_any_race',
'percent_race_pop_two_or_more_races',
'pop_density',
'tests_per_100_people',
'percent_inc_hhlds_35_000_to_49_999',
]]
y = df['cases_per_100_people']
# Split X and y into train/test groups.
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# Display the shape of the training and test data.
X_train.shape, X_test.shape
# Create a pipeline.
pipe = make_pipeline(PCA(random_state=22), LinearRegression())
# Set parameters for GridSearchCV.
params = {
'pca__n_components': [5, 10, 20, 50, 100, 200],
}
# Set up GridSearchCV with the pipe.
gs = GridSearchCV(pipe, params, cv=10, n_jobs=-1)
# Call fit on the GridSearchCV object and pass the training data.
gs.fit (X_train, y_train)
# Display the score for the test set.
print(f'Test R2 Score: {round(gs.score(X_test, y_test), 4)}.')
# Display the score for the training set.
print(f'Train R2 Score: {round(gs.score(X_train, y_train), 4)}.')
# Calculate the RMSE for the model.
y_preds = gs.predict(X_test)
print(f'RMSE of Model:{round(mean_squared_error(y_test, y_preds, squared=False), 4)}.')
# Plot the predicted versus the actual 'cases_per_100_people'.
plt.figure(figsize=(8,6))
sns.scatterplot(y_preds, y_test)
plt.title('California Actual vs. Predicted Cases/100 People', fontsize=16)
plt.xlabel('Predicted Cases/100 People', fontsize=14)
plt.ylabel('Actual Cases/100 People', fontsize=14);
```
# Classification Model: Best
This model was achieved 93% accuracy with 66% baseline.
```
# Assign our features to X and y.
X = df[[
'inc_per_capita_inc_dol',
'obes_percent',
'percent_race_pop_hispanic_or_latino_of_any_race',
'percent_sex_age_pop_under_5',
'percent_sex_age_pop_5_to_9',
'percent_sex_age_pop_10_to_14',
'percent_sex_age_pop_15_to_19',
'percent_race_pop_white_alone',
'percent_sex_age_pop_45_to_74',
'percent_sex_age_pop_75_and_over'
]]
y = df['covid_severity']
# Split X and y into train/test groups.
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42)
# Display the shape of the training and test data.
X_train.shape, X_test.shape
# Display the baseline percentages.
y_test.value_counts(normalize=True)
# Create a pipeline.
pipe = make_pipeline(StandardScaler(), SelectKBest(), LogisticRegression())
# Set parameters for GridSearchCV.
params = {
'selectkbest__k': range(1,300),
'logisticregression__C': [.1, .5, 1, 10]
}
# Set up GridSearchCV with the pipe.
gs = GridSearchCV(pipe, param_grid=params, cv=10, n_jobs=-1)
# Call fit on the GridSearchCV object and pass the training data.
gs.fit(X_train, y_train)
# Display the best parameters.
gs.best_params_
# Display the score for the test set.
print(f'Test Accuracy: {round(gs.score(X_test, y_test), 4)}.')
# Display the score for the training set.
print(f'Train Accuracy: {round(gs.score(X_train, y_train), 4)}.')
# Plot a confusion matrix.
y_preds = gs.predict(X_test)
plot_confusion_matrix(gs, X_test, y_test);
```
|
github_jupyter
|
# Import the required libraries
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
plt.style.use('seaborn-whitegrid')
import warnings
warnings.filterwarnings('ignore')
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn.pipeline import make_pipeline
from sklearn.metrics import mean_squared_error, plot_confusion_matrix
from sklearn.linear_model import LinearRegression, Ridge
from sklearn.preprocessing import StandardScaler, PolynomialFeatures, PowerTransformer, QuantileTransformer
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import PCA
# Import the data as a dataframe and save to a variable.
df = pd.read_csv('../data/cleaned_engineered_ca.csv')
# Display the first few rows of the dataframe.
df.head(2)
# Assign our features to X and y.
X = df[[
'inc_per_capita_inc_dol',
'obes_percent',
'percent_sex_age_pop_under_5',
'percent_sex_age_pop_5_to_9',
'percent_sex_age_pop_10_to_14',
'percent_sex_age_pop_15_to_19',
'percent_sex_age_pop_20_to_24',
'percent_sex_age_pop_25_to_34',
'percent_sex_age_pop_35_to_44',
'percent_sex_age_pop_45_to_74',
'percent_sex_age_pop_75_and_over',
'percent_sex_age_pop_male',
'percent_health_ins_noninst_pop_cov_no',
'percent_race_pop_white_alone',
'percent_race_pop_hispanic_or_latino_of_any_race',
'percent_race_pop_two_or_more_races',
'pop_density',
'tests_per_100_people',
'percent_inc_hhlds_35_000_to_49_999',
]]
y = df['cases_per_100_people']
# Split X and y into train/test groups.
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# Display the shape of the training and test data.
X_train.shape, X_test.shape
# Create a pipeline.
pipe = make_pipeline(QuantileTransformer(), SelectKBest(f_regression), Ridge(max_iter=20_000))
# Set parameters for GridSearchCV.
params = {
'selectkbest__k': range(1,20),
'ridge__alpha': [.1, 1, 10, 20]
}
# Set up GridSearchCV with the pipe.
gs = GridSearchCV(pipe, param_grid=params, scoring='r2', cv=10, n_jobs=-1)
# Call fit on the GridSearchCV object and pass the training data.
gs.fit(X_train, y_train)
# Display the best parameters.
gs.best_params_
# Display the score for the test set.
print(f'Test R2 Score: {round(gs.score(X_test, y_test), 4)}.')
# Display the score for the training set.
print(f'Train R2 Score: {round(gs.score(X_train, y_train), 4)}.')
# Calculate the RMSE for the model.
y_preds = gs.predict(X_test)
print(f'RMSE of Model:{round(mean_squared_error(y_test, y_preds, squared=False), 4)}.')
# Plot the predicted versus the actual 'cases_per_100_people'.
plt.figure(figsize=(8,6))
sns.scatterplot(y_preds, y_test)
plt.title('California Actual vs. Predicted Cases/100 People', fontsize=16)
plt.xlabel('Predicted Cases/100 People', fontsize=14)
plt.ylabel('Actual Cases/100 People', fontsize=14);
# Extract a boolean matrix of features selectkbest used in the best model.
features = gs.best_estimator_.named_steps['selectkbest'].get_support()
# Overlay the boolean matrix with the features to get the features used.
features = X_train.columns[features==True]
# Extract the coefficients for the features used in the best model.
coefs = gs.best_estimator_.named_steps['ridge'].coef_
# Create a dataframe of the features and their coefficients.
coef_df = pd.DataFrame(list(zip(features, coefs)), columns=['Feature', 'Coefficients'])
coef_df = coef_df.set_index('Feature')
coef_df = coef_df.sort_values(by='Coefficients', ascending=False)
# Rename the index.
coef_df = coef_df.rename(index={
'inc_per_capita_inc_dol': 'Income Per Capita ($)',
'obes_percent': 'Obesity (%)',
'pop_density': 'Population Density',
'percent_health_ins_noninst_pop_cov_no': 'Health Insurance: None (%)',
'percent_sex_age_pop_male': 'Sex: Male (%)',
'percent_sex_age_pop_under_5': 'Age: Under 5 Yrs (%)',
'percent_sex_age_pop_5_to_9': 'Age: 5-9 Yrs (%)',
'percent_sex_age_pop_10_to_14': 'Age: 10-14 Yrs (%)',
'percent_sex_age_pop_15_to_19': 'Age: 15-19 Yrs (%)',
'percent_sex_age_pop_20_to_24': 'Age: 20-24 Yrs (%)',
'percent_sex_age_pop_25_to_34': 'Age: 25-34 Yrs (%)',
'percent_sex_age_pop_35_to_44': 'Age: 35-44 Yrs (%)',
'percent_sex_age_pop_45_to_74': 'Age: 45-75 Yrs (%)',
'percent_sex_age_pop_75_and_over': 'Age: 75+ Yrs (%)',
'percent_race_pop_asian_alone': 'Race: Asian (%)',
'percent_race_pop_american_indian_and_alaska_native_alone': 'Race: American Indian/Alaskan (%)',
'percent_race_pop_black_or_african_american_alone': 'Race: Black/African American (%)',
'percent_race_pop_hispanic_or_latino_of_any_race': 'Race: Hispanic/Latino (%)',
'percent_race_pop_two_or_more_races': 'Race: Two or More Races (%)',
'percent_race_pop_white_alone': 'Race: White (%)',
'tests_per_100_people': 'Tests/100 People'
})
# Plot the datatframe of the features and their coefficients.
coef_df.sort_values(by='Coefficients').plot(kind = 'barh', figsize=(6,6), legend=None)
plt.title('California Model Coefficients', fontsize=16)
plt.yticks(fontsize=14);
# Extract the top 10 coefficients.
coef_neg = coef_df.sort_values(by='Coefficients')[:5]
coef_pos = coef_df.sort_values(by='Coefficients', ascending=False)[:5]
coef_df = pd.concat([coef_neg, coef_pos]).sort_values(by='Coefficients', ascending=False)
# Plot the datatframe of the features and their coefficients.
coef_df.sort_values(by='Coefficients').plot(kind = 'barh', figsize=(4,4), legend=None)
plt.title('California Model Coefficients', fontsize=16)
plt.yticks(fontsize=14);
# Assign our features to X and y.
X = df[[
'inc_per_capita_inc_dol',
'obes_percent',
'percent_sex_age_pop_under_5',
'percent_sex_age_pop_5_to_9',
'percent_sex_age_pop_10_to_14',
'percent_sex_age_pop_15_to_19',
'percent_sex_age_pop_20_to_24',
'percent_sex_age_pop_25_to_34',
'percent_sex_age_pop_35_to_44',
'percent_sex_age_pop_45_to_74',
'percent_sex_age_pop_75_and_over',
'percent_sex_age_pop_male',
'percent_health_ins_noninst_pop_cov_no',
'percent_race_pop_white_alone',
'percent_race_pop_hispanic_or_latino_of_any_race',
'percent_race_pop_two_or_more_races',
'pop_density',
'tests_per_100_people',
'percent_inc_hhlds_35_000_to_49_999',
]]
y = df['cases_per_100_people']
# Split X and y into train/test groups.
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# Display the shape of the training and test data.
X_train.shape, X_test.shape
# Create a pipeline.
pipe = make_pipeline(PCA(random_state=22), LinearRegression())
# Set parameters for GridSearchCV.
params = {
'pca__n_components': [5, 10, 20, 50, 100, 200],
}
# Set up GridSearchCV with the pipe.
gs = GridSearchCV(pipe, params, cv=10, n_jobs=-1)
# Call fit on the GridSearchCV object and pass the training data.
gs.fit (X_train, y_train)
# Display the score for the test set.
print(f'Test R2 Score: {round(gs.score(X_test, y_test), 4)}.')
# Display the score for the training set.
print(f'Train R2 Score: {round(gs.score(X_train, y_train), 4)}.')
# Calculate the RMSE for the model.
y_preds = gs.predict(X_test)
print(f'RMSE of Model:{round(mean_squared_error(y_test, y_preds, squared=False), 4)}.')
# Plot the predicted versus the actual 'cases_per_100_people'.
plt.figure(figsize=(8,6))
sns.scatterplot(y_preds, y_test)
plt.title('California Actual vs. Predicted Cases/100 People', fontsize=16)
plt.xlabel('Predicted Cases/100 People', fontsize=14)
plt.ylabel('Actual Cases/100 People', fontsize=14);
# Assign our features to X and y.
X = df[[
'inc_per_capita_inc_dol',
'obes_percent',
'percent_race_pop_hispanic_or_latino_of_any_race',
'percent_sex_age_pop_under_5',
'percent_sex_age_pop_5_to_9',
'percent_sex_age_pop_10_to_14',
'percent_sex_age_pop_15_to_19',
'percent_race_pop_white_alone',
'percent_sex_age_pop_45_to_74',
'percent_sex_age_pop_75_and_over'
]]
y = df['covid_severity']
# Split X and y into train/test groups.
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42)
# Display the shape of the training and test data.
X_train.shape, X_test.shape
# Display the baseline percentages.
y_test.value_counts(normalize=True)
# Create a pipeline.
pipe = make_pipeline(StandardScaler(), SelectKBest(), LogisticRegression())
# Set parameters for GridSearchCV.
params = {
'selectkbest__k': range(1,300),
'logisticregression__C': [.1, .5, 1, 10]
}
# Set up GridSearchCV with the pipe.
gs = GridSearchCV(pipe, param_grid=params, cv=10, n_jobs=-1)
# Call fit on the GridSearchCV object and pass the training data.
gs.fit(X_train, y_train)
# Display the best parameters.
gs.best_params_
# Display the score for the test set.
print(f'Test Accuracy: {round(gs.score(X_test, y_test), 4)}.')
# Display the score for the training set.
print(f'Train Accuracy: {round(gs.score(X_train, y_train), 4)}.')
# Plot a confusion matrix.
y_preds = gs.predict(X_test)
plot_confusion_matrix(gs, X_test, y_test);
| 0.807537 | 0.978447 |
```
import pandas as pd
import numpy as np
```
## Reimbursements
This notebook crosses the **CEIS** dataset which gather companies with some type of problem with Federal spheres and reimbursements related to **CEAP**.
```
reimbursements = pd.read_csv('../data/2016-08-08-current-year.xz',
dtype={'cnpj_cpf': np.str,
'document_id': np.str,
'reimbursement_value': np.float},
parse_dates = ['issue_date'],
low_memory=False)
reimbursements.shape
reimbursements.iloc[1]
```
---
## Inident and Suspended Companies - CEIS
This dataset gather companies and individuals who have suffered sanctions by the organs and entities of the public administration of the various federative spheres.
Origin of the information: Controladoria-Geral da União - CGU (Comptroller General of the Union)
```
inident_and_suspended_companies = pd.read_csv('../data/2016-12-21-inident-and-suspended-companies.xz',
dtype={'sanctioned_cnpj_cpf': np.str,
'process_number': np.str},
parse_dates = ['sanction_start_date',
'sanction_end_date',
'data_source_date',
'published_date'],
low_memory=False)
inident_and_suspended_companies.fillna('', inplace=True)
inident_and_suspended_companies['sanction_start_date'] = pd.to_datetime(
inident_and_suspended_companies['sanction_start_date'],
format='%Y-%m-%d')
inident_and_suspended_companies['sanction_end_date'] = pd.to_datetime(
inident_and_suspended_companies['sanction_end_date'],
format='%Y-%m-%d')
inident_and_suspended_companies.shape
inident_and_suspended_companies.iloc[0]
```
#### Querying the reimbursements from companies suspended
Below we have a list of reibursements from dates where the suplier is listed in CEIS dataset.
```
reimbursements_from_inident_companies = reimbursements.merge(
inident_and_suspended_companies,
left_on='cnpj_cpf',
right_on='sanctioned_cnpj_cpf')
reimbursements_from_inident_companies.shape
```
Now from the selected reimbursements, we get only the ones made between time the saction is valid.
```
suspect_reimbursements = reimbursements_from_inident_companies.query(
'sanction_start_date <= issue_date and \
issue_date <= sanction_end_date')
suspect_reimbursements.shape
suspect_reimbursements.filter([ 'document_id',
'congressperson_name',
'congressperson_id',
'congressperson_document',
'term',
'subquota_description',
'subquota_group_description',
'supplier',
'cnpj_cpf',
'document_number',
'document_type',
'issue_date',
'document_value',
'name_given_by_sanctioning_body',
'company_name_receita_database',
'trading_name_receita_database',
'process_number',
'sanction_type',
'sanction_start_date',
'sanction_end_date',
'sanctioning_body'])
```
The study above doesnt prove anything ilegal, but at least the congressperson should have research about the company before use its services.
|
github_jupyter
|
import pandas as pd
import numpy as np
reimbursements = pd.read_csv('../data/2016-08-08-current-year.xz',
dtype={'cnpj_cpf': np.str,
'document_id': np.str,
'reimbursement_value': np.float},
parse_dates = ['issue_date'],
low_memory=False)
reimbursements.shape
reimbursements.iloc[1]
inident_and_suspended_companies = pd.read_csv('../data/2016-12-21-inident-and-suspended-companies.xz',
dtype={'sanctioned_cnpj_cpf': np.str,
'process_number': np.str},
parse_dates = ['sanction_start_date',
'sanction_end_date',
'data_source_date',
'published_date'],
low_memory=False)
inident_and_suspended_companies.fillna('', inplace=True)
inident_and_suspended_companies['sanction_start_date'] = pd.to_datetime(
inident_and_suspended_companies['sanction_start_date'],
format='%Y-%m-%d')
inident_and_suspended_companies['sanction_end_date'] = pd.to_datetime(
inident_and_suspended_companies['sanction_end_date'],
format='%Y-%m-%d')
inident_and_suspended_companies.shape
inident_and_suspended_companies.iloc[0]
reimbursements_from_inident_companies = reimbursements.merge(
inident_and_suspended_companies,
left_on='cnpj_cpf',
right_on='sanctioned_cnpj_cpf')
reimbursements_from_inident_companies.shape
suspect_reimbursements = reimbursements_from_inident_companies.query(
'sanction_start_date <= issue_date and \
issue_date <= sanction_end_date')
suspect_reimbursements.shape
suspect_reimbursements.filter([ 'document_id',
'congressperson_name',
'congressperson_id',
'congressperson_document',
'term',
'subquota_description',
'subquota_group_description',
'supplier',
'cnpj_cpf',
'document_number',
'document_type',
'issue_date',
'document_value',
'name_given_by_sanctioning_body',
'company_name_receita_database',
'trading_name_receita_database',
'process_number',
'sanction_type',
'sanction_start_date',
'sanction_end_date',
'sanctioning_body'])
| 0.165829 | 0.760806 |
# California Single Weekly Predictions, trained on historical flu data and temperature
> Once again, just like before in the USA flu model, I am going to index COVID weekly cases by Wednesdays
```
import tensorflow as tf
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], enable=True)
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn
from sklearn import preprocessing
```
### getting historical flu data
```
system = "Windows"
if system == "Windows":
flu_dir = "..\\..\\..\\cdc-fludata\\us_national\\"
else:
flu_dir = "../../../cdc-fludata/us_national/"
flu_dictionary = {}
for year in range(1997, 2019):
filepath = "usflu_"
year_string = str(year) + "-" + str(year + 1)
filepath = flu_dir + filepath + year_string + ".csv"
temp_df = pd.read_csv(filepath)
flu_dictionary[year] = temp_df
```
### combining flu data into one chronological series of total cases
```
# getting total cases and putting them in a series by week
flu_series_dict = {}
for year in flu_dictionary:
temp_df = flu_dictionary[year]
temp_df = temp_df.set_index("WEEK")
abridged_df = temp_df.iloc[:, 2:]
try:
abridged_df = abridged_df.drop(columns="PERCENT POSITIVE")
except:
pass
total_cases_series = abridged_df.sum(axis=1)
flu_series_dict[year] = total_cases_series
all_cases_series = pd.Series(dtype="int64")
for year in flu_series_dict:
temp_series = flu_series_dict[year]
all_cases_series = all_cases_series.append(temp_series, ignore_index=True)
all_cases_series
all_cases_series.plot(grid=True, figsize=(60,20))
```
### Now, making a normalized series between 0, 1
```
norm_flu_series_dict = {}
for year in flu_series_dict:
temp_series = flu_series_dict[year]
temp_list = preprocessing.minmax_scale(temp_series)
temp_series = pd.Series(temp_list)
norm_flu_series_dict[year] = temp_series
all_cases_norm_series = pd.Series(dtype="int64")
for year in norm_flu_series_dict:
temp_series = norm_flu_series_dict[year]
all_cases_norm_series = all_cases_norm_series.append(temp_series, ignore_index=True)
#all_cases_norm_series.plot(grid=True, figsize=(60,5), )
plt.figure(figsize=(15,5))
plt.plot(all_cases_norm_series)
plt.grid()
plt.tick_params(axis='both', which='major', labelsize=15,length=10,width=3)
plt.xlabel(xlabel='week', size='15')
plt.ylabel(ylabel='weekly new flu cases (normalized)', size='15')
plt.title("Historical United States Flu Data, Normalized by Seasonal Maximums", size='15')
plt.show()
all_cases_norm_series
```
## Getting COVID-19 Case Data
```
if system == "Windows":
datapath = "..\\..\\..\\COVID-19\\csse_covid_19_data\\csse_covid_19_time_series\\"
else:
datapath = "../../../COVID-19/csse_covid_19_data/csse_covid_19_time_series/"
# Choose from "US Cases", "US Deaths", "World Cases", "World Deaths", "World Recoveries"
key = "US Cases"
if key == "US Cases":
datapath = datapath + "time_series_covid19_confirmed_US.csv"
elif key == "US Deaths":
datapath = datapath + "time_series_covid19_deaths_US.csv"
elif key == "World Cases":
datapath = datapath + "time_series_covid19_confirmed_global.csv"
elif key == "World Deaths":
datapath = datapath + "time_series_covid19_deaths_global.csv"
elif key == "World Recoveries":
datapath = datapath + "time_series_covid19_recovered_global.csv"
covid_df = pd.read_csv(datapath)
covid_df
cali_data = covid_df.loc[covid_df["Province_State"] == "California"]
cali_cases = cali_data.iloc[:,11:]
cali_cases_total = cali_cases.sum(axis=0)
cali_cases_total.plot()
```
### convert daily data to weekly data
```
cali_weekly_cases = cali_cases_total.iloc[::7]
cali_weekly_cases
cali_weekly_cases.plot()
```
### Converting cumulative series to non-cumulative series
```
cali_wnew_cases = cali_weekly_cases.diff()
cali_wnew_cases[0] = 1.0
cali_wnew_cases
cali_wnew_cases.plot()
cali_wnew_cases[21]
```
### normalizing weekly case data
> This is going to be different for california. This is because, the peak number of weekly new infections probably has not been reached yet. We need to divide everything by a guess for the peak number of predictions instead of min-max scaling.
```
# I'm guessing that the peak number of weekly cases will be about 60,000. Could definitely be wrong.
peak_guess = cali_wnew_cases[21]
cali_wnew_cases_norm = cali_wnew_cases / peak_guess
cali_wnew_cases_norm.plot()
cali_wnew_cases_norm
```
## getting temperature data
> At the moment, this will be dummy data
```
flu_temp_data = np.full(len(all_cases_norm_series), 0.5)
training_data_df = pd.DataFrame({
"Temperature" : flu_temp_data,
"Flu Cases" : all_cases_norm_series
})
training_data_df
covid_temp_data = np.full(len(cali_wnew_cases_norm), 0.5)
testing_data_df = pd.DataFrame({
"Temperature" : covid_temp_data,
"COVID Cases" : cali_wnew_cases_norm
})
testing_data_df
training_data_np = training_data_df.values
testing_data_np = testing_data_df.values
```
## Building Neural Net Model
### preparing model data
```
# this code is directly from https://www.tensorflow.org/tutorials/structured_data/time_series
# much of below data formatting code is derived straight from same link
def multivariate_data(dataset, target, start_index, end_index, history_size,
target_size, step, single_step=False):
data = []
labels = []
start_index = start_index + history_size
if end_index is None:
end_index = len(dataset) - target_size
for i in range(start_index, end_index):
indices = range(i-history_size, i, step)
data.append(dataset[indices])
if single_step:
labels.append(target[i+target_size])
else:
labels.append(target[i:i+target_size])
return np.array(data), np.array(labels)
past_history = 22
future_target = 0
STEP = 1
x_train_single, y_train_single = multivariate_data(training_data_np, training_data_np[:, 1], 0,
None, past_history,
future_target, STEP,
single_step=True)
x_test_single, y_test_single = multivariate_data(testing_data_np, testing_data_np[:, 1],
0, None, past_history,
future_target, STEP,
single_step=True)
BATCH_SIZE = 300
BUFFER_SIZE = 1000
train_data_single = tf.data.Dataset.from_tensor_slices((x_train_single, y_train_single))
train_data_single = train_data_single.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat()
test_data_single = tf.data.Dataset.from_tensor_slices((x_test_single, y_test_single))
test_data_single = test_data_single.batch(1).repeat()
```
### designing actual model
```
# creating the neural network model
lstm_prediction_model = tf.keras.Sequential([
tf.keras.layers.LSTM(32, input_shape=x_train_single.shape[-2:]),
tf.keras.layers.Dense(32),
tf.keras.layers.Dense(1)
])
lstm_prediction_model.compile(optimizer=tf.keras.optimizers.RMSprop(), loss="mae")
single_step_history = lstm_prediction_model.fit(train_data_single, epochs=10,
steps_per_epoch=250,
validation_data=test_data_single,
validation_steps=50)
def create_time_steps(length):
return list(range(-length, 0))
def show_plot(plot_data, delta, title):
labels = ['History', 'True Future', 'Model Prediction']
marker = ['.-', 'rx', 'go']
time_steps = create_time_steps(plot_data[0].shape[0])
if delta:
future = delta
else:
future = 0
plt.title(title)
for i, x in enumerate(plot_data):
if i:
plt.plot(future, plot_data[i], marker[i], markersize=10,
label=labels[i])
else:
plt.plot(time_steps, plot_data[i].flatten(), marker[i], label=labels[i])
plt.legend()
plt.xlim([time_steps[0], (future+5)*2])
plt.xlabel('Week (defined by Wednesdays)')
plt.ylabel('Normalized Cases')
return plt
for x, y in train_data_single.take(10):
#print(lstm_prediction_model.predict(x))
plot = show_plot([x[0][:, 1].numpy(), y[0].numpy(),
lstm_prediction_model.predict(x)[0]], 0,
'Training Data Prediction')
plot.show()
for x, y in test_data_single.take(1):
plot = show_plot([x[0][:, 1].numpy(), y[0].numpy(),
lstm_prediction_model.predict(x)[0]], 0,
'California COVID Case Prediction, Single Week, Max-Normalized')
plot.show()
```
|
github_jupyter
|
import tensorflow as tf
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], enable=True)
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn
from sklearn import preprocessing
system = "Windows"
if system == "Windows":
flu_dir = "..\\..\\..\\cdc-fludata\\us_national\\"
else:
flu_dir = "../../../cdc-fludata/us_national/"
flu_dictionary = {}
for year in range(1997, 2019):
filepath = "usflu_"
year_string = str(year) + "-" + str(year + 1)
filepath = flu_dir + filepath + year_string + ".csv"
temp_df = pd.read_csv(filepath)
flu_dictionary[year] = temp_df
# getting total cases and putting them in a series by week
flu_series_dict = {}
for year in flu_dictionary:
temp_df = flu_dictionary[year]
temp_df = temp_df.set_index("WEEK")
abridged_df = temp_df.iloc[:, 2:]
try:
abridged_df = abridged_df.drop(columns="PERCENT POSITIVE")
except:
pass
total_cases_series = abridged_df.sum(axis=1)
flu_series_dict[year] = total_cases_series
all_cases_series = pd.Series(dtype="int64")
for year in flu_series_dict:
temp_series = flu_series_dict[year]
all_cases_series = all_cases_series.append(temp_series, ignore_index=True)
all_cases_series
all_cases_series.plot(grid=True, figsize=(60,20))
norm_flu_series_dict = {}
for year in flu_series_dict:
temp_series = flu_series_dict[year]
temp_list = preprocessing.minmax_scale(temp_series)
temp_series = pd.Series(temp_list)
norm_flu_series_dict[year] = temp_series
all_cases_norm_series = pd.Series(dtype="int64")
for year in norm_flu_series_dict:
temp_series = norm_flu_series_dict[year]
all_cases_norm_series = all_cases_norm_series.append(temp_series, ignore_index=True)
#all_cases_norm_series.plot(grid=True, figsize=(60,5), )
plt.figure(figsize=(15,5))
plt.plot(all_cases_norm_series)
plt.grid()
plt.tick_params(axis='both', which='major', labelsize=15,length=10,width=3)
plt.xlabel(xlabel='week', size='15')
plt.ylabel(ylabel='weekly new flu cases (normalized)', size='15')
plt.title("Historical United States Flu Data, Normalized by Seasonal Maximums", size='15')
plt.show()
all_cases_norm_series
if system == "Windows":
datapath = "..\\..\\..\\COVID-19\\csse_covid_19_data\\csse_covid_19_time_series\\"
else:
datapath = "../../../COVID-19/csse_covid_19_data/csse_covid_19_time_series/"
# Choose from "US Cases", "US Deaths", "World Cases", "World Deaths", "World Recoveries"
key = "US Cases"
if key == "US Cases":
datapath = datapath + "time_series_covid19_confirmed_US.csv"
elif key == "US Deaths":
datapath = datapath + "time_series_covid19_deaths_US.csv"
elif key == "World Cases":
datapath = datapath + "time_series_covid19_confirmed_global.csv"
elif key == "World Deaths":
datapath = datapath + "time_series_covid19_deaths_global.csv"
elif key == "World Recoveries":
datapath = datapath + "time_series_covid19_recovered_global.csv"
covid_df = pd.read_csv(datapath)
covid_df
cali_data = covid_df.loc[covid_df["Province_State"] == "California"]
cali_cases = cali_data.iloc[:,11:]
cali_cases_total = cali_cases.sum(axis=0)
cali_cases_total.plot()
cali_weekly_cases = cali_cases_total.iloc[::7]
cali_weekly_cases
cali_weekly_cases.plot()
cali_wnew_cases = cali_weekly_cases.diff()
cali_wnew_cases[0] = 1.0
cali_wnew_cases
cali_wnew_cases.plot()
cali_wnew_cases[21]
# I'm guessing that the peak number of weekly cases will be about 60,000. Could definitely be wrong.
peak_guess = cali_wnew_cases[21]
cali_wnew_cases_norm = cali_wnew_cases / peak_guess
cali_wnew_cases_norm.plot()
cali_wnew_cases_norm
flu_temp_data = np.full(len(all_cases_norm_series), 0.5)
training_data_df = pd.DataFrame({
"Temperature" : flu_temp_data,
"Flu Cases" : all_cases_norm_series
})
training_data_df
covid_temp_data = np.full(len(cali_wnew_cases_norm), 0.5)
testing_data_df = pd.DataFrame({
"Temperature" : covid_temp_data,
"COVID Cases" : cali_wnew_cases_norm
})
testing_data_df
training_data_np = training_data_df.values
testing_data_np = testing_data_df.values
# this code is directly from https://www.tensorflow.org/tutorials/structured_data/time_series
# much of below data formatting code is derived straight from same link
def multivariate_data(dataset, target, start_index, end_index, history_size,
target_size, step, single_step=False):
data = []
labels = []
start_index = start_index + history_size
if end_index is None:
end_index = len(dataset) - target_size
for i in range(start_index, end_index):
indices = range(i-history_size, i, step)
data.append(dataset[indices])
if single_step:
labels.append(target[i+target_size])
else:
labels.append(target[i:i+target_size])
return np.array(data), np.array(labels)
past_history = 22
future_target = 0
STEP = 1
x_train_single, y_train_single = multivariate_data(training_data_np, training_data_np[:, 1], 0,
None, past_history,
future_target, STEP,
single_step=True)
x_test_single, y_test_single = multivariate_data(testing_data_np, testing_data_np[:, 1],
0, None, past_history,
future_target, STEP,
single_step=True)
BATCH_SIZE = 300
BUFFER_SIZE = 1000
train_data_single = tf.data.Dataset.from_tensor_slices((x_train_single, y_train_single))
train_data_single = train_data_single.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat()
test_data_single = tf.data.Dataset.from_tensor_slices((x_test_single, y_test_single))
test_data_single = test_data_single.batch(1).repeat()
# creating the neural network model
lstm_prediction_model = tf.keras.Sequential([
tf.keras.layers.LSTM(32, input_shape=x_train_single.shape[-2:]),
tf.keras.layers.Dense(32),
tf.keras.layers.Dense(1)
])
lstm_prediction_model.compile(optimizer=tf.keras.optimizers.RMSprop(), loss="mae")
single_step_history = lstm_prediction_model.fit(train_data_single, epochs=10,
steps_per_epoch=250,
validation_data=test_data_single,
validation_steps=50)
def create_time_steps(length):
return list(range(-length, 0))
def show_plot(plot_data, delta, title):
labels = ['History', 'True Future', 'Model Prediction']
marker = ['.-', 'rx', 'go']
time_steps = create_time_steps(plot_data[0].shape[0])
if delta:
future = delta
else:
future = 0
plt.title(title)
for i, x in enumerate(plot_data):
if i:
plt.plot(future, plot_data[i], marker[i], markersize=10,
label=labels[i])
else:
plt.plot(time_steps, plot_data[i].flatten(), marker[i], label=labels[i])
plt.legend()
plt.xlim([time_steps[0], (future+5)*2])
plt.xlabel('Week (defined by Wednesdays)')
plt.ylabel('Normalized Cases')
return plt
for x, y in train_data_single.take(10):
#print(lstm_prediction_model.predict(x))
plot = show_plot([x[0][:, 1].numpy(), y[0].numpy(),
lstm_prediction_model.predict(x)[0]], 0,
'Training Data Prediction')
plot.show()
for x, y in test_data_single.take(1):
plot = show_plot([x[0][:, 1].numpy(), y[0].numpy(),
lstm_prediction_model.predict(x)[0]], 0,
'California COVID Case Prediction, Single Week, Max-Normalized')
plot.show()
| 0.358353 | 0.85373 |
# 0.0. Imports
```
import math
import pandas as pd
import inflection
import numpy as np
import seaborn as sns
import datetime
from matplotlib import pyplot as plt
from IPython.display import Image
```
## 0.1. Helper Functions
## 0.2. Loading Datas
```
df_sales_raw = pd.read_csv('Dados/rossmann-store-sales/train.csv', low_memory=False)
df_store_raw = pd.read_csv('Dados/rossmann-store-sales/store.csv', low_memory=False)
#merge
df_raw = pd.merge(df_sales_raw,df_store_raw,how='left',on='Store')
df_raw.sample() # Roda uma linha aleatória do DataFrame
df_raw.columns
```
# 1.0. Descrição dos Dados
```
df1 = df_raw.copy()
```
## 1.1. Rename Columns
```
df1.columns
cols_old = ['Store', 'DayOfWeek', 'Date', 'Sales', 'Customers', 'Open', 'Promo','StateHoliday', 'SchoolHoliday',
'StoreType', 'Assortment','CompetitionDistance', 'CompetitionOpenSinceMonth',
'CompetitionOpenSinceYear','Promo2', 'Promo2SinceWeek','Promo2SinceYear', 'PromoInterval']
# Rename
snakecase = lambda x: inflection.underscore(x)
cols_new = list(map(snakecase,cols_old))
df1.columns = cols_new
df1.columns
```
## 1.2. Data Dimensions
```
print('Number of Rows: {}'.format(df1.shape[0]))
print('Number of Cols: {}'.format(df1.shape[1]))
```
## 1.3. Data Types
```
df1['date'] = pd.to_datetime(df1['date'])
df1.dtypes
```
## 1.4. Check NA
```
df1.isna().sum()
```
# 1.5. FIllout NA
DD
D,D
```
df1['competition_distance'].max()
# competition_distance
# Substituindo NA por uma constante muito superior à distância máxima para dizer que não existe competição nesses
# casos
df1['competition_distance'] = df1['competition_distance'].apply(lambda x: 200000.0 if math.isnan(x) else x)
# Verificando se a alteração foi feita.
df1.isna().sum()
# Verificando o novo valor máximo da coluna
df1['competition_distance'].max()
# competition_open_since_month
df1['competition_open_since_month'] = df1.apply(lambda x: x['date'].month
if math.isnan(x['competition_open_since_month'])
else x['competition_open_since_month'],axis=1)
# competition_open_since_year
df1['competition_open_since_year'] = df1.apply(lambda x: x['date'].year
if math.isnan(x['competition_open_since_year'])
else x['competition_open_since_year'],axis=1)
#df1['competition_open_since_month'] = df1.apply(lambda x: x['date'].month
# if math.isnan(x['competition_open_since_month'])
# else x['competition_open_since_month'],axis=1)
# promo2_since_week
df1['promo2_since_week'] = df1.apply(lambda x: x['date'].week
if math.isnan(x['promo2_since_week'])
else x['promo2_since_week'],axis=1)
# promo2_since_year
df1['promo2_since_year'] = df1.apply(lambda x: x['date'].year
if math.isnan(x['promo2_since_year'])
else x['promo2_since_year'],axis=1)
# promo_interval
month_map = {1: 'jan', 2: 'fev',3: 'mar',4: 'apr',5: 'mai',6: 'jun',7: 'jul',8: 'aug',9: 'sep',10: 'oct',11: 'nov',12: 'dec'}
df1['promo_interval'].fillna(0,inplace=True)
df1['month_map'] = df1['date'].dt.month.map(month_map)
df1['is_promo'] = df1[['promo_interval', 'month_map']].apply(lambda x: 0 if x['promo_interval'] == 0 else 1 if x['month_map'] in x['promo_interval'].split(',') else 0, axis=1)
df1.sample(5).transpose()
df1.isna().sum()
```
## 1.6. Change Types
```
df1.dtypes
# Convertendo de Float para int
df1['competition_distance'] = df1['competition_distance'].astype(int)
df1['competition_open_since_month'] = df1['competition_open_since_month'].astype(int)
df1['competition_open_since_year'] = df1['competition_open_since_year'].astype(int)
df1['promo2_since_week'] = df1['promo2_since_week'].astype(int)
df1['promo2_since_year'] = df1['promo2_since_year'].astype(int)
df1.dtypes
```
## 1.7. Descriptive Statistical
```
num_attributes = df1.select_dtypes(include=['int64','float64'])
cat_attributes = df1.select_dtypes( exclude=['int64', 'float64', 'datetime64[ns]'] )
```
## 1.7.1 Numeric Attributes
```
# Central Tendency - Média e Mediana
ct1 = pd.DataFrame(num_attributes.apply(np.mean)).T
ct2 = pd.DataFrame(num_attributes.apply(np.median)).T
# Dispersion - std, min, max, range, skew, kurtosis
disp1 = pd.DataFrame(num_attributes.apply(np.std)).T
disp2 = pd.DataFrame(num_attributes.apply(min ) ).T
disp3 = pd.DataFrame(num_attributes.apply(max ) ).T
disp4 = pd.DataFrame(num_attributes.apply(lambda x: x.max() - x.min())).T
disp5 = pd.DataFrame( num_attributes.apply( lambda x: x.skew() ) ).T
disp6 = pd.DataFrame( num_attributes.apply( lambda x: x.kurtosis() ) ).T
# Concatenate
metrica = pd.concat( [disp2, disp3, disp4, ct1, ct2, disp1, disp5, disp6] ).T.reset_index()
metrica.columns = ['attributes', 'min', 'max', 'range', 'mean', 'median', 'std', 'skew', 'kurtosis']
metrica
sns.displot( df1['sales'], kde=False )
```
## 1.7.2 Categorical Attributes
```
cat_attributes.apply( lambda x: x.unique().shape[0] )
aux = df1[(df1['state_holiday'] != '0') & (df1['sales'] > 0)]
plt.subplot( 1, 3, 1 )
sns.boxplot( x='state_holiday', y='sales', data=aux )
plt.subplot( 1, 3, 2 )
sns.boxplot( x='store_type', y='sales', data=aux )
plt.subplot( 1, 3, 3 )
sns.boxplot( x='assortment', y='sales', data=aux )
```
# 2.0. Featuring Engineering
```
df2 = df1.copy()
```
## 2.1. Mapa Mental de Hipóteses - Mindmap
```
Image('img/MindMapHypothesis.png')
```
## 2.2. Criação de Hipóteses
### 2.2.1 Hipóteses Relacionadas a Loja
**1.** Lojas com número maior de funcionários deveriam vender mais.
**2.** Lojas com maior capacidade de estoque deveriam vender mais.
**3.** Lojas com maior porte deveriam vender mais.
**4.** Lojas com maior sortimentos deveriam vender mais.
**5.** Lojas com competidores mais próximos deveriam vender menos.
**6.** Lojas com competidores à mais tempo deveriam vendem mais.
### 2.2.2 Hipóteses Relacionadas ao Produto
**1.** Lojas que investem mais em Marketing deveriam vender mais.
**2.** Lojas com maior exposição de produto deveriam vender mais.
**3.** Lojas com produtos com preço menor deveriam vender mais.
**5.** Lojas com promoções mais agressivas ( descontos maiores ), deveriam vender mais.
**6.** Lojas com promoções ativas por mais tempo deveriam vender mais.
**7.** Lojas com mais dias de promoção deveriam vender mais.
**8.** Lojas com mais promoções consecutivas deveriam vender mais.
### 2.2.3 Hipóteses Relacionadas ao Tempo
**1.** Lojas abertas durante o feriado de Natal deveriam vender mais.
**2.** Lojas deveriam vender mais ao longo dos anos.
**3.** Lojas deveriam vender mais no segundo semestre do ano.
**4.** Lojas deveriam vender mais depois do dia 10 de cada mês.
**5.** Lojas deveriam vender menos aos finais de semana.
**6.** Lojas deveriam vender menos durante os feriados escolares.
## 2.3 Lista Final de Hipóteses
**1.** Lojas com maior sortimentos deveriam vender mais.
**2.** Lojas com competidores mais próximos deveriam vender menos.
**3.** Lojas com competidores à mais tempo deveriam vendem mais.
**4.** Lojas com promoções ativas por mais tempo deveriam vender mais.
**5.** Lojas com mais dias de promoção deveriam vender mais.
**7.** Lojas com mais promoções consecutivas deveriam vender mais.
**8.** Lojas abertas durante o feriado de Natal deveriam vender mais.
**9.** Lojas deveriam vender mais ao longo dos anos.
**10.** Lojas deveriam vender mais no segundo semestre do ano.
**11.** Lojas deveriam vender mais depois do dia 10 de cada mês.
**12.** Lojas deveriam vender menos aos finais de semana.
**13.** Lojas deveriam vender menos durante os feriados escolares.
## 2.4. Feature Engineering
```
# year
df2['year'] = df2['date'].dt.year
# month
df2['month'] = df2['date'].dt.month
# day
df2['day'] = df2['date'].dt.day
# week of year
#df2['week_of_year'] = df2['date'].dt.weekofyear
# year week
#df2['year_week'] = df2['date'].dt.strftime( '%Y-%W' )
# competition since
df2['competition_since'] = df2.apply( lambda x: datetime.datetime( year=x['competition_open_since_year'], month=x['competition_open_since_month'],day=1 ), axis=1 )
df2['competition_time_month'] = ( ( df2['date'] - df2['competition_since'] )/30 ).apply( lambda x: x.days ).astype( int )
# promo since
df2['promo_since'] = df2['promo2_since_year'].astype( str ) + '-' + df2['promo2_since_week'].astype( str )
df2['promo_since'] = df2['promo_since'].apply( lambda x: datetime.datetime.strptime( x + '-1', '%Y-%W-%w' ) - datetime.timedelta( days=7 ) )
df2['promo_time_week'] = ( ( df2['date'] - df2['promo_since'] )/7 ).apply( lambda x: x.days ).astype( int )
# assortment
df2['assortment'] = df2['assortment'].apply( lambda x: 'basic' if x == 'a' else 'extra' if x == 'b' else 'extended' )
# state holiday
df2['state_holiday'] = df2['state_holiday'].apply( lambda x: 'public_holiday' if x == 'a' else 'easter_holiday' if x == 'b' else 'christmas' if x == 'c' else 'regular_day' )
df2.head().T
```
|
github_jupyter
|
import math
import pandas as pd
import inflection
import numpy as np
import seaborn as sns
import datetime
from matplotlib import pyplot as plt
from IPython.display import Image
df_sales_raw = pd.read_csv('Dados/rossmann-store-sales/train.csv', low_memory=False)
df_store_raw = pd.read_csv('Dados/rossmann-store-sales/store.csv', low_memory=False)
#merge
df_raw = pd.merge(df_sales_raw,df_store_raw,how='left',on='Store')
df_raw.sample() # Roda uma linha aleatória do DataFrame
df_raw.columns
df1 = df_raw.copy()
df1.columns
cols_old = ['Store', 'DayOfWeek', 'Date', 'Sales', 'Customers', 'Open', 'Promo','StateHoliday', 'SchoolHoliday',
'StoreType', 'Assortment','CompetitionDistance', 'CompetitionOpenSinceMonth',
'CompetitionOpenSinceYear','Promo2', 'Promo2SinceWeek','Promo2SinceYear', 'PromoInterval']
# Rename
snakecase = lambda x: inflection.underscore(x)
cols_new = list(map(snakecase,cols_old))
df1.columns = cols_new
df1.columns
print('Number of Rows: {}'.format(df1.shape[0]))
print('Number of Cols: {}'.format(df1.shape[1]))
df1['date'] = pd.to_datetime(df1['date'])
df1.dtypes
df1.isna().sum()
df1['competition_distance'].max()
# competition_distance
# Substituindo NA por uma constante muito superior à distância máxima para dizer que não existe competição nesses
# casos
df1['competition_distance'] = df1['competition_distance'].apply(lambda x: 200000.0 if math.isnan(x) else x)
# Verificando se a alteração foi feita.
df1.isna().sum()
# Verificando o novo valor máximo da coluna
df1['competition_distance'].max()
# competition_open_since_month
df1['competition_open_since_month'] = df1.apply(lambda x: x['date'].month
if math.isnan(x['competition_open_since_month'])
else x['competition_open_since_month'],axis=1)
# competition_open_since_year
df1['competition_open_since_year'] = df1.apply(lambda x: x['date'].year
if math.isnan(x['competition_open_since_year'])
else x['competition_open_since_year'],axis=1)
#df1['competition_open_since_month'] = df1.apply(lambda x: x['date'].month
# if math.isnan(x['competition_open_since_month'])
# else x['competition_open_since_month'],axis=1)
# promo2_since_week
df1['promo2_since_week'] = df1.apply(lambda x: x['date'].week
if math.isnan(x['promo2_since_week'])
else x['promo2_since_week'],axis=1)
# promo2_since_year
df1['promo2_since_year'] = df1.apply(lambda x: x['date'].year
if math.isnan(x['promo2_since_year'])
else x['promo2_since_year'],axis=1)
# promo_interval
month_map = {1: 'jan', 2: 'fev',3: 'mar',4: 'apr',5: 'mai',6: 'jun',7: 'jul',8: 'aug',9: 'sep',10: 'oct',11: 'nov',12: 'dec'}
df1['promo_interval'].fillna(0,inplace=True)
df1['month_map'] = df1['date'].dt.month.map(month_map)
df1['is_promo'] = df1[['promo_interval', 'month_map']].apply(lambda x: 0 if x['promo_interval'] == 0 else 1 if x['month_map'] in x['promo_interval'].split(',') else 0, axis=1)
df1.sample(5).transpose()
df1.isna().sum()
df1.dtypes
# Convertendo de Float para int
df1['competition_distance'] = df1['competition_distance'].astype(int)
df1['competition_open_since_month'] = df1['competition_open_since_month'].astype(int)
df1['competition_open_since_year'] = df1['competition_open_since_year'].astype(int)
df1['promo2_since_week'] = df1['promo2_since_week'].astype(int)
df1['promo2_since_year'] = df1['promo2_since_year'].astype(int)
df1.dtypes
num_attributes = df1.select_dtypes(include=['int64','float64'])
cat_attributes = df1.select_dtypes( exclude=['int64', 'float64', 'datetime64[ns]'] )
# Central Tendency - Média e Mediana
ct1 = pd.DataFrame(num_attributes.apply(np.mean)).T
ct2 = pd.DataFrame(num_attributes.apply(np.median)).T
# Dispersion - std, min, max, range, skew, kurtosis
disp1 = pd.DataFrame(num_attributes.apply(np.std)).T
disp2 = pd.DataFrame(num_attributes.apply(min ) ).T
disp3 = pd.DataFrame(num_attributes.apply(max ) ).T
disp4 = pd.DataFrame(num_attributes.apply(lambda x: x.max() - x.min())).T
disp5 = pd.DataFrame( num_attributes.apply( lambda x: x.skew() ) ).T
disp6 = pd.DataFrame( num_attributes.apply( lambda x: x.kurtosis() ) ).T
# Concatenate
metrica = pd.concat( [disp2, disp3, disp4, ct1, ct2, disp1, disp5, disp6] ).T.reset_index()
metrica.columns = ['attributes', 'min', 'max', 'range', 'mean', 'median', 'std', 'skew', 'kurtosis']
metrica
sns.displot( df1['sales'], kde=False )
cat_attributes.apply( lambda x: x.unique().shape[0] )
aux = df1[(df1['state_holiday'] != '0') & (df1['sales'] > 0)]
plt.subplot( 1, 3, 1 )
sns.boxplot( x='state_holiday', y='sales', data=aux )
plt.subplot( 1, 3, 2 )
sns.boxplot( x='store_type', y='sales', data=aux )
plt.subplot( 1, 3, 3 )
sns.boxplot( x='assortment', y='sales', data=aux )
df2 = df1.copy()
Image('img/MindMapHypothesis.png')
# year
df2['year'] = df2['date'].dt.year
# month
df2['month'] = df2['date'].dt.month
# day
df2['day'] = df2['date'].dt.day
# week of year
#df2['week_of_year'] = df2['date'].dt.weekofyear
# year week
#df2['year_week'] = df2['date'].dt.strftime( '%Y-%W' )
# competition since
df2['competition_since'] = df2.apply( lambda x: datetime.datetime( year=x['competition_open_since_year'], month=x['competition_open_since_month'],day=1 ), axis=1 )
df2['competition_time_month'] = ( ( df2['date'] - df2['competition_since'] )/30 ).apply( lambda x: x.days ).astype( int )
# promo since
df2['promo_since'] = df2['promo2_since_year'].astype( str ) + '-' + df2['promo2_since_week'].astype( str )
df2['promo_since'] = df2['promo_since'].apply( lambda x: datetime.datetime.strptime( x + '-1', '%Y-%W-%w' ) - datetime.timedelta( days=7 ) )
df2['promo_time_week'] = ( ( df2['date'] - df2['promo_since'] )/7 ).apply( lambda x: x.days ).astype( int )
# assortment
df2['assortment'] = df2['assortment'].apply( lambda x: 'basic' if x == 'a' else 'extra' if x == 'b' else 'extended' )
# state holiday
df2['state_holiday'] = df2['state_holiday'].apply( lambda x: 'public_holiday' if x == 'a' else 'easter_holiday' if x == 'b' else 'christmas' if x == 'c' else 'regular_day' )
df2.head().T
| 0.187058 | 0.75142 |
```
import matplotlib.pyplot as plt
import pandas as pd
from zipline import run_algorithm
from zipline.api import order_target_percent, record, symbol
from datetime import datetime
import pytz
%matplotlib inline
def initialize(context):
dji = ['AAPL',
'AXP',
'BA',
'CAT',
'CSCO',
'CVX',
'DIS',
'DWDP', # ? "DD" or "GE"
'GS',
'HD',
'IBM',
'INTC',
'JNJ',
'JPM',
'KO',
'MCD',
'MMM',
'MRK',
'MSFT',
'NKE',
'PFE',
'PG',
'TRV',
'UNH',
'UTX', #RTX
'V',
'VZ',
'WBA',
'WMT',
'XOM']
# Make a list of symbols from the list of tickers
context.dji_symbols = [symbol(s) for s in dji]
# Moving average window
context.index_average_window = 100
# Set zipline benchmark to false
context.set_benchmark(False)
def handle_data(data, context):
# Get history of all the stock
stock_hist = data.history(context.dji_symbols, 'close', context.index_average_window, '1d')
# Make empty DataFrame to start with
stock_analytics = pd.DataFrame()
# Add column for above or below average
stock_analytics['above_mean'] = stock_hist.iloc[-1] > pstock_hist.mean()
# Set weights for stocks to buy
stock_analytics.loc[stock_analytics['above_mean'] == True, 'weight'] = 1/len(context.dji_symbols)
# Set zero weights for the rest
stock_analytics.loc[stock_analytics['above_mean'] == False, 'weight'] = 0.0
# Iterate each row and place trades
for stock, analytics in stock_analytics.itterrows():
# Check if the stock can be traded?
if data.can_trade(stock):
# Place the trade
order_target_percent(stock, analytics['weight'])
def analyze(context, perf):
fig = plt.figure(figsize=(12, 8))
# First chart
ax1 = fig.add_subplot(311)
ax1.set_title('Strategy results')
ax1.plot(pref['portfolio_value'], linestyle='-', label='Equity Curve', linewidth=3.0)
ax1.legend()
ax1.grid(False)
# Second chart
ax2 = fig.add_subplot(312)
ax2.plot(perf['gross_leverage'], linestule='-', linewidth=1.0)
ax2.legend()
ax2.grid(True)
# Third chart
ax3 = fig.add_subplot(313)
ax3.plot(perf['returns'], label='Returns', linestyle='-.', linewidth=1.0)
ax3.legend()
ax3.grid(True)
# Set start and end dates
#start = pd.to_datetime('2004-1-1', utc=True)
#end = pd.to_datetime('2005-12-31', utc=True)
start = pd.Timestamp('2002-1-1', tz='utc')
end = pd.Timestamp('2017-12-31', tz='utc')
# Run the backtest and get the results
results = run_algorithm(start=start,
end=end,
initialize=initialize,
analyze=analyze,
handle_data=handle_data,
capital_base=10000,
data_frequency='daily', bundle='quandl'
)
```
|
github_jupyter
|
import matplotlib.pyplot as plt
import pandas as pd
from zipline import run_algorithm
from zipline.api import order_target_percent, record, symbol
from datetime import datetime
import pytz
%matplotlib inline
def initialize(context):
dji = ['AAPL',
'AXP',
'BA',
'CAT',
'CSCO',
'CVX',
'DIS',
'DWDP', # ? "DD" or "GE"
'GS',
'HD',
'IBM',
'INTC',
'JNJ',
'JPM',
'KO',
'MCD',
'MMM',
'MRK',
'MSFT',
'NKE',
'PFE',
'PG',
'TRV',
'UNH',
'UTX', #RTX
'V',
'VZ',
'WBA',
'WMT',
'XOM']
# Make a list of symbols from the list of tickers
context.dji_symbols = [symbol(s) for s in dji]
# Moving average window
context.index_average_window = 100
# Set zipline benchmark to false
context.set_benchmark(False)
def handle_data(data, context):
# Get history of all the stock
stock_hist = data.history(context.dji_symbols, 'close', context.index_average_window, '1d')
# Make empty DataFrame to start with
stock_analytics = pd.DataFrame()
# Add column for above or below average
stock_analytics['above_mean'] = stock_hist.iloc[-1] > pstock_hist.mean()
# Set weights for stocks to buy
stock_analytics.loc[stock_analytics['above_mean'] == True, 'weight'] = 1/len(context.dji_symbols)
# Set zero weights for the rest
stock_analytics.loc[stock_analytics['above_mean'] == False, 'weight'] = 0.0
# Iterate each row and place trades
for stock, analytics in stock_analytics.itterrows():
# Check if the stock can be traded?
if data.can_trade(stock):
# Place the trade
order_target_percent(stock, analytics['weight'])
def analyze(context, perf):
fig = plt.figure(figsize=(12, 8))
# First chart
ax1 = fig.add_subplot(311)
ax1.set_title('Strategy results')
ax1.plot(pref['portfolio_value'], linestyle='-', label='Equity Curve', linewidth=3.0)
ax1.legend()
ax1.grid(False)
# Second chart
ax2 = fig.add_subplot(312)
ax2.plot(perf['gross_leverage'], linestule='-', linewidth=1.0)
ax2.legend()
ax2.grid(True)
# Third chart
ax3 = fig.add_subplot(313)
ax3.plot(perf['returns'], label='Returns', linestyle='-.', linewidth=1.0)
ax3.legend()
ax3.grid(True)
# Set start and end dates
#start = pd.to_datetime('2004-1-1', utc=True)
#end = pd.to_datetime('2005-12-31', utc=True)
start = pd.Timestamp('2002-1-1', tz='utc')
end = pd.Timestamp('2017-12-31', tz='utc')
# Run the backtest and get the results
results = run_algorithm(start=start,
end=end,
initialize=initialize,
analyze=analyze,
handle_data=handle_data,
capital_base=10000,
data_frequency='daily', bundle='quandl'
)
| 0.606032 | 0.418192 |
<a href="https://colab.research.google.com/github/google/jax-md/blob/main/notebooks/sand_castle.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
#@title Imports & Utils
!pip install jax-md
!wget https://raw.githubusercontent.com/google/jax-md/main/examples/models/sand_castle.png
import imageio
import jax.numpy as jnp
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('pdf', 'svg')
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style(style='white')
background_color = [56 / 256] * 3
def make_from_image(filename, size_in_pixels):
position = []
angle = []
color = []
img = imageio.imread(filename)
scale = 2**(1/6)
ratio = jnp.sqrt(1 - 0.25)
for i, y in enumerate(range(0, img.shape[0], size_in_pixels)):
for x in range(0, img.shape[1], size_in_pixels):
r, g, b, a = img[y, x]
if a == 255:
hshift = size_in_pixels * (i % 2) / 2.0
position += [[scale * (x + hshift) / size_in_pixels, scale * (img.shape[0] - y) / size_in_pixels * ratio]]
color += [[r / 255, g / 255, b / 255]]
img_size = jnp.array(img.shape[:2]).T / size_in_pixels * scale
box_size = jnp.max(img_size) * 1.5
position = jnp.array(position, jnp.float64) + box_size / 2.0 - img_size / 2
color = jnp.array(color, jnp.float64)
return box_size, position, color
```
# Sand Castle
In this demo we simulate a sand castle and then demolish it using a projectile.
## Load the sand castle
```
box, positions, colors = make_from_image('sand_castle.png', 24)
from jax_md.colab_tools import renderer
renderer.render(box,
renderer.Disk(positions, color=colors))
print(f'There are {len(positions)} grains.')
```
## Spaces
```
from jax_md import space
displacement_fn, shift_fn = space.periodic(box)
positions[0]
displacement_fn(positions[0], positions[-1])
shift_fn(positions[0], jnp.array([10.0, 0.0]))
```
## Energy
"Energy" in Physics plays a similar role to "Loss" in machine learning.
Write down an energy function between two grains of sand, $\epsilon(r)$.
The total energy will be the sum of all pairs of energies.
$$E = \sum_{i,j} \epsilon(r_{ij})$$
where $r_{ij}$ is the distance between grain $i$ and grain $j$.
We want to model wet sand:
* Grains are hard (no interpenetration).
* Grains stick together a little bit.
* Grains far away from one another don't notice each other.
```
from jax_md import energy
rs = jnp.linspace(0.5, 2.5)
plt.plot(rs, energy.lennard_jones(rs))
plt.ylim([-1, 1])
plt.xlim([0, 2.5])
plt.xlabel('$r_{ij}$')
plt.ylabel('$\\epsilon$')
sand_energy = energy.lennard_jones_pair(displacement_fn)
sand_energy(positions)
```
## Simulate
```
from jax import random
simulation_steps = 10000
write_every = 50
key = random.PRNGKey(1)
from jax_md import simulate
from jax import jit
init_fn, step_fn = simulate.nvt_langevin(sand_energy, shift_fn, dt=5e-3, kT=0.0, gamma=1e-2)
sand = init_fn(key, positions)
step_fn = jit(step_fn)
trajectory = []
for i in range(simulation_steps):
if i % write_every == 0:
trajectory += [sand.position]
sand = step_fn(sand)
trajectory = jnp.stack(trajectory)
renderer.render(box, renderer.Disk(trajectory, color=colors))
```
## Simulate slightly faster...
```
from jax import lax
def simulation_fn(i, sand_trajectory):
sand, trajectory = sand_trajectory
trajectory = trajectory.at[i].set(sand.position)
sand = lax.fori_loop(0, write_every, lambda _, s: step_fn(s), sand)
return sand, trajectory
write_steps = simulation_steps // write_every
n = positions.shape[0]
sand = init_fn(random.PRNGKey(0), positions)
trajectory = jnp.zeros((write_steps, n, 2))
sand, trajectory = lax.fori_loop(0, write_steps, simulation_fn, (sand, trajectory))
renderer.render(box, renderer.Disk(trajectory, color=colors))
```
## Let's blow it up!
### The projectile
```
projectile = jnp.array([1.0, box / 3.0])
radius = jnp.array(2.0)
strength = 1000.0
velocity = jnp.array([3e-2, 0.0])
```
Model the projectile by adding a term to the energy,
$$E = \sum_{i,j}\epsilon(r_{ij}) + \sum_i \epsilon_p(r_{ip})$$
where $r_{ip}$ is the distance between grain $i$ and the projectile.
Want the projectile to only repel the sand (no attraction).
```
from jax_md import energy
rs = jnp.linspace(0.5, 2.5)
plt.plot(rs, energy.lennard_jones(rs))
plt.plot(rs, energy.soft_sphere(rs, epsilon=strength))
plt.ylim([-1, 10])
plt.xlim([0, 2.5])
plt.xlabel('$r_{ij}$')
plt.ylabel('$\\epsilon$')
def projectile_energy(sand, projectile):
distance = jnp.linalg.norm(sand - projectile, axis=-1)
e = energy.soft_sphere(distance, sigma=radius + 1.0, epsilon=strength)
return jnp.sum(e)
def total_energy(sand, projectile, **kwargs):
return sand_energy(sand) + projectile_energy(sand, projectile)
```
### Run the simulation
```
from jax_md import dataclasses
@dataclasses.dataclass
class SandCastle:
sand: simulate.NVTLangevinState
projectile: jnp.ndarray
simulation_steps = 10000
write_every = 50
write_steps = simulation_steps // write_every
from jax_md import simulate
init_fn, step_fn = simulate.nvt_langevin(total_energy, shift_fn, dt=5e-3, kT=0.0)
from jax import lax
def simulation_fn(i, state_trajectory):
state, traj = state_trajectory
traj = SandCastle(
traj.sand.at[i].set(state.sand.position),
traj.projectile.at[i].set(state.projectile)
)
def total_step_fn(_, state):
return SandCastle(
step_fn(state.sand, projectile=state.projectile),
state.projectile + velocity
)
state = lax.fori_loop(0, write_every, total_step_fn, state)
return state, traj
n = positions.shape[0]
state = SandCastle(
init_fn(key, positions, projectile=projectile),
projectile
)
trajectory = SandCastle(
jnp.zeros((write_steps, n, 2)),
jnp.zeros((write_steps, 2))
)
state, trajectory = lax.fori_loop(0, write_steps, simulation_fn, (state, trajectory))
renderer.render(
box,
{
'sand': renderer.Disk(trajectory.sand, color=colors),
'projectile': renderer.Disk(trajectory.projectile[:, None, :],
diameter=radius * 2)
}
)
```
## Scaling Up
So far at each step we have been computing the interaction between every pair of grains.
But grains that are far apart don't affect each other.
```
box, positions, colors = make_from_image('sand_castle.png', 6)
len(positions)
from jax_md.colab_tools import renderer
renderer.render(box, renderer.Disk(positions, color=colors))
displacement_fn, shift_fn = space.periodic(box)
```
### Neighbor lists
```
neighbor_fn, sand_energy = energy.lennard_jones_neighbor_list(displacement_fn, box)
nbrs = neighbor_fn.allocate(positions)
nbrs.idx.shape
def total_energy(sand, projectile, neighbor, **kwargs):
return sand_energy(sand, neighbor) + projectile_energy(sand, projectile)
```
### Simulation
```
simulation_steps = 30000
write_every = 400
write_steps = simulation_steps // write_every
projectile = jnp.array([1.0, box / 3.0])
radius = jnp.array(8.0)
from jax_md import partition
@dataclasses.dataclass
class SandCastle:
sand: simulate.NVTLangevinState
projectile: jnp.ndarray
neighbor: partition.NeighborList
from jax_md import simulate
init_fn, step_fn = simulate.nvt_langevin(total_energy, shift_fn, dt=5e-3, kT=0.0, gamma=1e-2)
from jax import lax
def simulation_fn(i, state_trajectory):
state, traj = state_trajectory
traj = SandCastle(
traj.sand.at[i].set(state.sand.position),
traj.projectile.at[i].set(state.projectile),
None
)
def total_step_fn(_, state):
sand = step_fn(state.sand,
projectile=state.projectile,
neighbor=state.neighbor)
projectile = state.projectile + velocity
neighbor = state.neighbor.update(state.sand.position)
return SandCastle(sand, projectile, neighbor)
state = lax.fori_loop(0, write_every, total_step_fn, state)
return state, traj
n = positions.shape[0]
state = SandCastle(
init_fn(random.PRNGKey(0), positions, projectile=projectile, neighbor=nbrs),
projectile,
nbrs
)
trajectory = SandCastle(
jnp.zeros((write_steps, n, 2)),
jnp.zeros((write_steps, 2)),
None
)
state, trajectory = lax.fori_loop(0, write_steps, simulation_fn, (state, trajectory))
state.neighbor.did_buffer_overflow
renderer.render(
box,
{
'sand': renderer.Disk(trajectory.sand, color=colors),
'projectile': renderer.Disk(trajectory.projectile[:, None, :],
diameter=radius * 2)
},
buffer_size=10
)
```
|
github_jupyter
|
#@title Imports & Utils
!pip install jax-md
!wget https://raw.githubusercontent.com/google/jax-md/main/examples/models/sand_castle.png
import imageio
import jax.numpy as jnp
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('pdf', 'svg')
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style(style='white')
background_color = [56 / 256] * 3
def make_from_image(filename, size_in_pixels):
position = []
angle = []
color = []
img = imageio.imread(filename)
scale = 2**(1/6)
ratio = jnp.sqrt(1 - 0.25)
for i, y in enumerate(range(0, img.shape[0], size_in_pixels)):
for x in range(0, img.shape[1], size_in_pixels):
r, g, b, a = img[y, x]
if a == 255:
hshift = size_in_pixels * (i % 2) / 2.0
position += [[scale * (x + hshift) / size_in_pixels, scale * (img.shape[0] - y) / size_in_pixels * ratio]]
color += [[r / 255, g / 255, b / 255]]
img_size = jnp.array(img.shape[:2]).T / size_in_pixels * scale
box_size = jnp.max(img_size) * 1.5
position = jnp.array(position, jnp.float64) + box_size / 2.0 - img_size / 2
color = jnp.array(color, jnp.float64)
return box_size, position, color
box, positions, colors = make_from_image('sand_castle.png', 24)
from jax_md.colab_tools import renderer
renderer.render(box,
renderer.Disk(positions, color=colors))
print(f'There are {len(positions)} grains.')
from jax_md import space
displacement_fn, shift_fn = space.periodic(box)
positions[0]
displacement_fn(positions[0], positions[-1])
shift_fn(positions[0], jnp.array([10.0, 0.0]))
from jax_md import energy
rs = jnp.linspace(0.5, 2.5)
plt.plot(rs, energy.lennard_jones(rs))
plt.ylim([-1, 1])
plt.xlim([0, 2.5])
plt.xlabel('$r_{ij}$')
plt.ylabel('$\\epsilon$')
sand_energy = energy.lennard_jones_pair(displacement_fn)
sand_energy(positions)
from jax import random
simulation_steps = 10000
write_every = 50
key = random.PRNGKey(1)
from jax_md import simulate
from jax import jit
init_fn, step_fn = simulate.nvt_langevin(sand_energy, shift_fn, dt=5e-3, kT=0.0, gamma=1e-2)
sand = init_fn(key, positions)
step_fn = jit(step_fn)
trajectory = []
for i in range(simulation_steps):
if i % write_every == 0:
trajectory += [sand.position]
sand = step_fn(sand)
trajectory = jnp.stack(trajectory)
renderer.render(box, renderer.Disk(trajectory, color=colors))
from jax import lax
def simulation_fn(i, sand_trajectory):
sand, trajectory = sand_trajectory
trajectory = trajectory.at[i].set(sand.position)
sand = lax.fori_loop(0, write_every, lambda _, s: step_fn(s), sand)
return sand, trajectory
write_steps = simulation_steps // write_every
n = positions.shape[0]
sand = init_fn(random.PRNGKey(0), positions)
trajectory = jnp.zeros((write_steps, n, 2))
sand, trajectory = lax.fori_loop(0, write_steps, simulation_fn, (sand, trajectory))
renderer.render(box, renderer.Disk(trajectory, color=colors))
projectile = jnp.array([1.0, box / 3.0])
radius = jnp.array(2.0)
strength = 1000.0
velocity = jnp.array([3e-2, 0.0])
from jax_md import energy
rs = jnp.linspace(0.5, 2.5)
plt.plot(rs, energy.lennard_jones(rs))
plt.plot(rs, energy.soft_sphere(rs, epsilon=strength))
plt.ylim([-1, 10])
plt.xlim([0, 2.5])
plt.xlabel('$r_{ij}$')
plt.ylabel('$\\epsilon$')
def projectile_energy(sand, projectile):
distance = jnp.linalg.norm(sand - projectile, axis=-1)
e = energy.soft_sphere(distance, sigma=radius + 1.0, epsilon=strength)
return jnp.sum(e)
def total_energy(sand, projectile, **kwargs):
return sand_energy(sand) + projectile_energy(sand, projectile)
from jax_md import dataclasses
@dataclasses.dataclass
class SandCastle:
sand: simulate.NVTLangevinState
projectile: jnp.ndarray
simulation_steps = 10000
write_every = 50
write_steps = simulation_steps // write_every
from jax_md import simulate
init_fn, step_fn = simulate.nvt_langevin(total_energy, shift_fn, dt=5e-3, kT=0.0)
from jax import lax
def simulation_fn(i, state_trajectory):
state, traj = state_trajectory
traj = SandCastle(
traj.sand.at[i].set(state.sand.position),
traj.projectile.at[i].set(state.projectile)
)
def total_step_fn(_, state):
return SandCastle(
step_fn(state.sand, projectile=state.projectile),
state.projectile + velocity
)
state = lax.fori_loop(0, write_every, total_step_fn, state)
return state, traj
n = positions.shape[0]
state = SandCastle(
init_fn(key, positions, projectile=projectile),
projectile
)
trajectory = SandCastle(
jnp.zeros((write_steps, n, 2)),
jnp.zeros((write_steps, 2))
)
state, trajectory = lax.fori_loop(0, write_steps, simulation_fn, (state, trajectory))
renderer.render(
box,
{
'sand': renderer.Disk(trajectory.sand, color=colors),
'projectile': renderer.Disk(trajectory.projectile[:, None, :],
diameter=radius * 2)
}
)
box, positions, colors = make_from_image('sand_castle.png', 6)
len(positions)
from jax_md.colab_tools import renderer
renderer.render(box, renderer.Disk(positions, color=colors))
displacement_fn, shift_fn = space.periodic(box)
neighbor_fn, sand_energy = energy.lennard_jones_neighbor_list(displacement_fn, box)
nbrs = neighbor_fn.allocate(positions)
nbrs.idx.shape
def total_energy(sand, projectile, neighbor, **kwargs):
return sand_energy(sand, neighbor) + projectile_energy(sand, projectile)
simulation_steps = 30000
write_every = 400
write_steps = simulation_steps // write_every
projectile = jnp.array([1.0, box / 3.0])
radius = jnp.array(8.0)
from jax_md import partition
@dataclasses.dataclass
class SandCastle:
sand: simulate.NVTLangevinState
projectile: jnp.ndarray
neighbor: partition.NeighborList
from jax_md import simulate
init_fn, step_fn = simulate.nvt_langevin(total_energy, shift_fn, dt=5e-3, kT=0.0, gamma=1e-2)
from jax import lax
def simulation_fn(i, state_trajectory):
state, traj = state_trajectory
traj = SandCastle(
traj.sand.at[i].set(state.sand.position),
traj.projectile.at[i].set(state.projectile),
None
)
def total_step_fn(_, state):
sand = step_fn(state.sand,
projectile=state.projectile,
neighbor=state.neighbor)
projectile = state.projectile + velocity
neighbor = state.neighbor.update(state.sand.position)
return SandCastle(sand, projectile, neighbor)
state = lax.fori_loop(0, write_every, total_step_fn, state)
return state, traj
n = positions.shape[0]
state = SandCastle(
init_fn(random.PRNGKey(0), positions, projectile=projectile, neighbor=nbrs),
projectile,
nbrs
)
trajectory = SandCastle(
jnp.zeros((write_steps, n, 2)),
jnp.zeros((write_steps, 2)),
None
)
state, trajectory = lax.fori_loop(0, write_steps, simulation_fn, (state, trajectory))
state.neighbor.did_buffer_overflow
renderer.render(
box,
{
'sand': renderer.Disk(trajectory.sand, color=colors),
'projectile': renderer.Disk(trajectory.projectile[:, None, :],
diameter=radius * 2)
},
buffer_size=10
)
| 0.583441 | 0.914482 |
# FICHE DE SYNTHESE : DIVISER POUR REGNER
Le paradigme "diviser pour régner" fait partie des entrées prépondérentes au bac.
## I) Le principe
Les 15 joueuses d’une équipe de volleyball ont la liste des joueuses de l’équipe avec leur numéro de téléphone. La capitaine reçoit l’information que le prochain match a été déplacé. Il faut prévenir toutes les autres joueuses.
### Solution 1 :
la capitaine se charge d’appeler toutes les autres joueuses. Si elle passe 5 minutes au téléphone avec chacune d’entre-elles. Question : En combien de temps (noté t1) l’ensemble de l’équipe est informé? En déduire la complexité de cette solution en fonction de n (taille de l’équipe)
### Solution 2 :
Une solution plus efficace et plus confortable pour la capitaine est qu’elle divise la liste de joueuses en deux moitiés. Elle appelle alors la première joueuse de chacune des deux listes obtenues. Elle leur donne l’information de report de match et leur demande à leur tour de faire la même chose : diviser en deux la demi-liste à laquelle elles appartiennent, appeler la première joueuse de chacune des parties et ainsi de suite … jusqu’à ce qu’il n’y ait plus personne à prévenir.
Représentons l’arbre des appels pour la liste de 15 joueuses numérotées de 1 à 15.
<img src="https://allophysique.com/docs/nsi/algorithmes/images/page5_appels.png" width=500>
Question : Si on suppose qu’un appel téléphonique dure 5 min. En combien de temps (noté t2) l’ensemble de l’équipe est informé ? En déduire la complexité de cette solution en fonction de n (taille de l’équipe)
## II) Exemples classiques simples :
### 1) la recherche par dichotomie
on dispose d'un tableau de taille n trié, on cherche si une valeur v est présente dans le tableau
```
import random
n=100
t = [random.randint(0,1000) for i in range(n)]
t.sort()
print(t)
```
### Méthode naïve : on parcourt le tableau jusqu'à trouver l'élément
Quelle est la complexité de cet algorithme ?
```
def recherche(v,t):
""" entree : v valeur, t tableau
sortie : booleen qui dit si v appartient à t"""
# à coder
```
### Méthode par dichotomie :
on va passer par une fonction intermédiaire récursive
`dicho(v,t,imin,imax)` qui cherche v entre l'indice imin et imax
Quelle est la complexité de cet algorithme ?
```
def dicho(v,t,imin,imax):
""" entree : v valeur, t tableau, imin indice min , imax indicemax où cherché
sortie : booleen qui dit si v appartient à t entre les indices ci-dessus"""
# à coder
```
## 2) L'exponentiation rapide
on cherche à calculer x^n = x*x*x*x ....*x (n fois)
### méthode naïve (mais récursive!)
Quelle est la complexité de cet algorithme ?
```
def exp(x,n):
"""entrée : x réel, n entier positif
sortie : la valeur de x^n
"""
# à coder en récursif !
```
### méthode "diviser pour régner"
on utilise le fait que x^6 = x^3 * x^3
on fera attention à la parité de n .
Quelle est la complexité de cet algorithme ?
```
def exp2(x,n):
"""entrée : x réel, n entier positif
sortie : la valeur de x^n
"""
# à coder en récursif !
```
### 3) Exemple moins classique :
On cherche le maximum dans un tableau de n valeurs non trié.
```
import random
n=100
t = [random.randint(0,1000) for i in range(n)]
```
### approche naïve : on cherche linéairement le maximum
```
def maxi(t):
"""entrée :t tableau non vide
sortie : la valeur max
"""
# à coder
```
### approche "diviser pour régner".
On découpe le tableau en 2 et on compare les 2 max obtenus.
```
def maxi2(t,imin,imax):
"""entrée :t tableau non vide, imin et imax deux indices
sortie : la valeur max entre imin et imax
"""
```
## 4) Le grand classique du "diviser pour régner" : le tri-fusion
```
import random
n=100
t = [random.randint(0,1000) for i in range(n)]
```
rappel :
1) on dispose d'une fonction `fusion` qui réunit deux listes triées
2) on découpe le tableau en deux et on trie les parties séparément (on peut ensuite fusionner les résultats
```
def fusion(t1,t2):
"""entrée : t1 et t2 deux tableaux triés
sortie : un tableau contenant les valeurs triées de t1 et t2"""
# à coder
def tri_fusion(t):
"""entree: t tableau non vide
sortie : tableau trié
"""
# à coder
```
|
github_jupyter
|
import random
n=100
t = [random.randint(0,1000) for i in range(n)]
t.sort()
print(t)
def recherche(v,t):
""" entree : v valeur, t tableau
sortie : booleen qui dit si v appartient à t"""
# à coder
def dicho(v,t,imin,imax):
""" entree : v valeur, t tableau, imin indice min , imax indicemax où cherché
sortie : booleen qui dit si v appartient à t entre les indices ci-dessus"""
# à coder
def exp(x,n):
"""entrée : x réel, n entier positif
sortie : la valeur de x^n
"""
# à coder en récursif !
def exp2(x,n):
"""entrée : x réel, n entier positif
sortie : la valeur de x^n
"""
# à coder en récursif !
import random
n=100
t = [random.randint(0,1000) for i in range(n)]
def maxi(t):
"""entrée :t tableau non vide
sortie : la valeur max
"""
# à coder
def maxi2(t,imin,imax):
"""entrée :t tableau non vide, imin et imax deux indices
sortie : la valeur max entre imin et imax
"""
import random
n=100
t = [random.randint(0,1000) for i in range(n)]
def fusion(t1,t2):
"""entrée : t1 et t2 deux tableaux triés
sortie : un tableau contenant les valeurs triées de t1 et t2"""
# à coder
def tri_fusion(t):
"""entree: t tableau non vide
sortie : tableau trié
"""
# à coder
| 0.09774 | 0.916633 |
## Target
* The benefit of deep neural network architectures.
* The Stacked LSTM recurrent neural network architecture.
* How to implement stacked LSTMs in Python with Keras.
## Stacked LSTM Architecture
Stacked LSTMs are now a stable technique for challenging sequence prediction problems. A Stacked LSTM architecture can be defined as an LSTM model comprised of multiple LSTM layers. An LSTM layer above provides a sequence output rather than a single value output to the LSTM layer below. Specifically, one output per input time step, rather than one output time step for all input time steps.
<img src='stacked_lstm_image/architecture_stacked_lstm.png' >
We can easily create Stacked LSTM models in Keras Python deep learning library
Each LSTMs memory cell requires a 3D input. When an LSTM processes one input sequence of time steps, each memory cell will output a single value for the whole sequence as a 2D array.
We can demonstrate this below with a model that has a single hidden LSTM layer that is also the output layer.
```
# Example of one output for whole sequence
from keras.models import Sequential
from keras.layers import LSTM
from numpy import array
# define model where LSTM is also output layer
model = Sequential()
model.add(LSTM(1, input_shape=(3,1)))
model.compile(optimizer='adam', loss='mse')
# input time steps
data = array([0.1, 0.2, 0.3]).reshape((1,3,1))
# make and show prediction
print(model.predict(data))
```
The 2 dimemsion for output array is: [batch,output]
if we input 2 batch of data
```
# Example of one output for whole sequence
from keras.models import Sequential
from keras.layers import LSTM
from numpy import array
# define model where LSTM is also output layer
model = Sequential()
model.add(LSTM(1, input_shape=(3,1)))
model.compile(optimizer='adam', loss='mse')
# input time steps
data = array([[0.1, 0.2, 0.3],[0.11, 0.21, 0.31]]).reshape((2,3,1))
# make and show prediction
print(model.predict(data))
```
To stack LSTM layers, we need to change the configuration of the prior LSTM layer to output a 3D array as input for the subsequent layer.
We can do this by setting the return_sequences argument on the layer to True (defaults to False). This will return one output for each input time step and provide a 3D array.
Below is the same example as above with return_sequences=True.
```
# Example of one output for each input time step
from keras.models import Sequential
from keras.layers import LSTM
from numpy import array
# define model where LSTM is also output layer
model = Sequential()
model.add(LSTM(1, return_sequences=True, input_shape=(3,1)))
model.compile(optimizer='adam', loss='mse')
# input time steps
data = array([0.1, 0.2, 0.3]).reshape((1,3,1))
# make and show prediction
print(model.predict(data))
```
Below is an example of defining a 2 hidden layer Stacked LSTM:
```
# Example of one output for each input time step
from keras.models import Sequential
from keras.layers import LSTM,Dense
from numpy import array
# define model where LSTM is also output layer
model = Sequential()
model.add(LSTM(1, return_sequences=True, input_shape=(3,1)))
model.add(LSTM(1, return_sequences=False, input_shape=(3,1)))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse')
# input time steps
data = array([0.1, 0.2, 0.3]).reshape((1,3,1))
# make and show prediction
print(model.predict(data))
```
** The Following is not related to topic, I just try to understand output shape of every layer **
```
# Example of one output for each input time step
from keras.models import Sequential
from keras.layers import LSTM,Dense
from numpy import array
# define model where LSTM is also output layer
model = Sequential()
model.add(LSTM(1, return_sequences=True, input_shape=(3,1)))
model.add(LSTM(1, return_sequences=False, input_shape=(3,1)))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse')
# input time steps
data = array([[0.1, 0.2, 0.3],[0.11, 0.21, 0.31]]).reshape((2,3,1))
# make and show prediction
outputs = model.predict(data)
print(outputs)
print(outputs.shape)
# Example of one output for each input time step
from keras.models import Sequential
from keras.layers import LSTM,Dense
from numpy import array
# define model where LSTM is also output layer
model = Sequential()
model.add(LSTM(1, return_sequences=True, input_shape=(3,1)))
model.add(LSTM(1, return_sequences=True, input_shape=(3,1)))
model.add(Dense(5))
model.compile(optimizer='adam', loss='mse')
# input time steps
data = array([[0.1, 0.2, 0.3],[0.11, 0.21, 0.31]]).reshape((2,3,1))
# make and show prediction
outputs = model.predict(data)
print(outputs)
print(outputs.shape)
# Example of one output for each input time step
from keras.models import Sequential
from keras.layers import LSTM,Dense
from numpy import array
# define model where LSTM is also output layer
model = Sequential()
model.add(LSTM(1, return_sequences=True, input_shape=(3,1)))
model.add(LSTM(3, return_sequences=True, input_shape=(3,1)))
# model.add(Dense(5))
model.compile(optimizer='adam', loss='mse')
# input time steps
data = array([[0.1, 0.2, 0.3],[0.11, 0.21, 0.31]]).reshape((2,3,1))
# make and show prediction
outputs = model.predict(data)
print(outputs)
print(outputs.shape)
```
|
github_jupyter
|
# Example of one output for whole sequence
from keras.models import Sequential
from keras.layers import LSTM
from numpy import array
# define model where LSTM is also output layer
model = Sequential()
model.add(LSTM(1, input_shape=(3,1)))
model.compile(optimizer='adam', loss='mse')
# input time steps
data = array([0.1, 0.2, 0.3]).reshape((1,3,1))
# make and show prediction
print(model.predict(data))
# Example of one output for whole sequence
from keras.models import Sequential
from keras.layers import LSTM
from numpy import array
# define model where LSTM is also output layer
model = Sequential()
model.add(LSTM(1, input_shape=(3,1)))
model.compile(optimizer='adam', loss='mse')
# input time steps
data = array([[0.1, 0.2, 0.3],[0.11, 0.21, 0.31]]).reshape((2,3,1))
# make and show prediction
print(model.predict(data))
# Example of one output for each input time step
from keras.models import Sequential
from keras.layers import LSTM
from numpy import array
# define model where LSTM is also output layer
model = Sequential()
model.add(LSTM(1, return_sequences=True, input_shape=(3,1)))
model.compile(optimizer='adam', loss='mse')
# input time steps
data = array([0.1, 0.2, 0.3]).reshape((1,3,1))
# make and show prediction
print(model.predict(data))
# Example of one output for each input time step
from keras.models import Sequential
from keras.layers import LSTM,Dense
from numpy import array
# define model where LSTM is also output layer
model = Sequential()
model.add(LSTM(1, return_sequences=True, input_shape=(3,1)))
model.add(LSTM(1, return_sequences=False, input_shape=(3,1)))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse')
# input time steps
data = array([0.1, 0.2, 0.3]).reshape((1,3,1))
# make and show prediction
print(model.predict(data))
# Example of one output for each input time step
from keras.models import Sequential
from keras.layers import LSTM,Dense
from numpy import array
# define model where LSTM is also output layer
model = Sequential()
model.add(LSTM(1, return_sequences=True, input_shape=(3,1)))
model.add(LSTM(1, return_sequences=False, input_shape=(3,1)))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse')
# input time steps
data = array([[0.1, 0.2, 0.3],[0.11, 0.21, 0.31]]).reshape((2,3,1))
# make and show prediction
outputs = model.predict(data)
print(outputs)
print(outputs.shape)
# Example of one output for each input time step
from keras.models import Sequential
from keras.layers import LSTM,Dense
from numpy import array
# define model where LSTM is also output layer
model = Sequential()
model.add(LSTM(1, return_sequences=True, input_shape=(3,1)))
model.add(LSTM(1, return_sequences=True, input_shape=(3,1)))
model.add(Dense(5))
model.compile(optimizer='adam', loss='mse')
# input time steps
data = array([[0.1, 0.2, 0.3],[0.11, 0.21, 0.31]]).reshape((2,3,1))
# make and show prediction
outputs = model.predict(data)
print(outputs)
print(outputs.shape)
# Example of one output for each input time step
from keras.models import Sequential
from keras.layers import LSTM,Dense
from numpy import array
# define model where LSTM is also output layer
model = Sequential()
model.add(LSTM(1, return_sequences=True, input_shape=(3,1)))
model.add(LSTM(3, return_sequences=True, input_shape=(3,1)))
# model.add(Dense(5))
model.compile(optimizer='adam', loss='mse')
# input time steps
data = array([[0.1, 0.2, 0.3],[0.11, 0.21, 0.31]]).reshape((2,3,1))
# make and show prediction
outputs = model.predict(data)
print(outputs)
print(outputs.shape)
| 0.751101 | 0.983629 |
# DataSynthesizer Usage (correlated attribute mode)
> This is a quick demo to use DataSynthesizer in correlated attribute mode.
### Step 1 import packages
```
from DataSynthesizer.DataDescriber import DataDescriber
from DataSynthesizer.DataGenerator import DataGenerator
from DataSynthesizer.ModelInspector import ModelInspector
from DataSynthesizer.lib.utils import read_json_file, display_bayesian_network
import pandas as pd
```
### Step 2 user-defined parameteres
```
# input dataset
input_data = './data/adult_ssn.csv'
# location of two output files
mode = 'correlated_attribute_mode'
description_file = f'./out/{mode}/description.json'
synthetic_data = f'./out/{mode}/sythetic_data.csv'
!mkdir mode
```
Default values set here, change here if needed.
```
# An attribute is categorical if its domain size is less than this threshold.
# Here modify the threshold to adapt to the domain size of "education" (which is 14 in input dataset).
threshold_value = 20
# specify categorical attributes
categorical_attributes = {'education': True}
# specify which attributes are candidate keys of input dataset.
candidate_keys = {'ssn': True}
# A parameter in Differential Privacy. It roughly means that removing a row in the input dataset will not
# change the probability of getting the same output more than a multiplicative difference of exp(epsilon).
# Increase epsilon value to reduce the injected noises. Set epsilon=0 to turn off differential privacy.
epsilon = 1
# The maximum number of parents in Bayesian network, i.e., the maximum number of incoming edges.
degree_of_bayesian_network = 2
# Number of tuples generated in synthetic dataset.
num_tuples_to_generate = 1000 # Here 32561 is the same as input dataset, but it can be set to another number.
```
### Step 3 DataDescriber
1. Instantiate a DataDescriber.
2. Compute the statistics of the dataset.
3. Save dataset description to a file on local machine.
```
describer = DataDescriber(category_threshold=threshold_value)
describer.describe_dataset_in_correlated_attribute_mode(dataset_file=input_data,
epsilon=epsilon,
k=degree_of_bayesian_network,
attribute_to_is_categorical=categorical_attributes,
attribute_to_is_candidate_key=candidate_keys)
describer.save_dataset_description_to_file(description_file)
display_bayesian_network(describer.bayesian_network)
```
### Step 4 generate synthetic dataset
1. Instantiate a DataGenerator.
2. Generate a synthetic dataset.
3. Save it to local machine.
```
generator = DataGenerator()
generator.generate_dataset_in_correlated_attribute_mode(num_tuples_to_generate, description_file)
generator.save_synthetic_data(synthetic_data)
```
### Step 5 compare the statistics of input and sythetic data (optional)
The synthetic data is already saved in a file by step 4. The ModelInspector is for a quick test on the similarity between input and synthetic datasets.
#### 5.1 instantiate a ModelInspector.
It needs input dataset, synthetic dataset, and attribute description.
```
# Read both datasets using Pandas.
input_df = pd.read_csv(input_data, skipinitialspace=True)
synthetic_df = pd.read_csv(synthetic_data)
# Read attribute description from the dataset description file.
attribute_description = read_json_file(description_file)['attribute_description']
inspector = ModelInspector(input_df, synthetic_df, attribute_description)
```
#### 5.2 compare histograms between input and synthetic datasets.
```
for attribute in synthetic_df.columns:
inspector.compare_histograms(attribute)
```
#### 5.3 compare pairwise mutual information
```
inspector.mutual_information_heatmap()
```
|
github_jupyter
|
from DataSynthesizer.DataDescriber import DataDescriber
from DataSynthesizer.DataGenerator import DataGenerator
from DataSynthesizer.ModelInspector import ModelInspector
from DataSynthesizer.lib.utils import read_json_file, display_bayesian_network
import pandas as pd
# input dataset
input_data = './data/adult_ssn.csv'
# location of two output files
mode = 'correlated_attribute_mode'
description_file = f'./out/{mode}/description.json'
synthetic_data = f'./out/{mode}/sythetic_data.csv'
!mkdir mode
# An attribute is categorical if its domain size is less than this threshold.
# Here modify the threshold to adapt to the domain size of "education" (which is 14 in input dataset).
threshold_value = 20
# specify categorical attributes
categorical_attributes = {'education': True}
# specify which attributes are candidate keys of input dataset.
candidate_keys = {'ssn': True}
# A parameter in Differential Privacy. It roughly means that removing a row in the input dataset will not
# change the probability of getting the same output more than a multiplicative difference of exp(epsilon).
# Increase epsilon value to reduce the injected noises. Set epsilon=0 to turn off differential privacy.
epsilon = 1
# The maximum number of parents in Bayesian network, i.e., the maximum number of incoming edges.
degree_of_bayesian_network = 2
# Number of tuples generated in synthetic dataset.
num_tuples_to_generate = 1000 # Here 32561 is the same as input dataset, but it can be set to another number.
describer = DataDescriber(category_threshold=threshold_value)
describer.describe_dataset_in_correlated_attribute_mode(dataset_file=input_data,
epsilon=epsilon,
k=degree_of_bayesian_network,
attribute_to_is_categorical=categorical_attributes,
attribute_to_is_candidate_key=candidate_keys)
describer.save_dataset_description_to_file(description_file)
display_bayesian_network(describer.bayesian_network)
generator = DataGenerator()
generator.generate_dataset_in_correlated_attribute_mode(num_tuples_to_generate, description_file)
generator.save_synthetic_data(synthetic_data)
# Read both datasets using Pandas.
input_df = pd.read_csv(input_data, skipinitialspace=True)
synthetic_df = pd.read_csv(synthetic_data)
# Read attribute description from the dataset description file.
attribute_description = read_json_file(description_file)['attribute_description']
inspector = ModelInspector(input_df, synthetic_df, attribute_description)
for attribute in synthetic_df.columns:
inspector.compare_histograms(attribute)
inspector.mutual_information_heatmap()
| 0.571767 | 0.863046 |
<a href="https://colab.research.google.com/github/Tessellate-Imaging/Monk_Object_Detection/blob/master/example_notebooks/20_solo/Training%20and%20Inference%20Example.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Installation
- Run these commands
- git clone https://github.com/Tessellate-Imaging/Monk_Object_Detection.git
- cd Monk_Object_Detection/20_solo/installation
- Select the right file and run
- chmod +x install.sh && ./install.sh
```
! git clone https://github.com/Tessellate-Imaging/Monk_Object_Detection.git
! cd Monk_Object_Detection/20_solo/installation && chmod +x install.sh && ./install.sh
```
# COCO Format
## Dataset Directory Structure
../sample_dataset (root_dir)
|
|------kangaroo (coco_dir)
| |
| |---ImagesTrain (set_dir)
| |----|
| |-------------------img1.jpg
| |-------------------img2.jpg
| |-------------------.........(and so on)
|
|
| |---ImagesVal (set_dir)
| |----|
| |-------------------img1.jpg
| |-------------------img2.jpg
| |-------------------.........(and so on)
|
|
| |---annotations
| |----|
| |--------------------instances_ImagesTrain.json (instances_<set_dir>.json)
| |--------------------instances_ImagesVal.json (instances_<set_dir>.json)
| |--------------------classes.txt
- instances_Train.json -> In proper COCO format
- classes.txt -> A list of classes in alphabetical order
## Get more details on how to convert your dataset to coco instance segmentation format
- https://patrickwasp.com/create-your-own-coco-style-dataset/
- https://towardsdatascience.com/master-the-coco-dataset-for-semantic-image-segmentation-part-1-of-2-732712631047
- https://github.com/adions025/XMLtoJson_Mask_RCNN
- https://www.dlology.com/blog/how-to-create-custom-coco-data-set-for-instance-segmentation/
# For this example we use elephants subset from COCO API datasets
## Direct download the subset from here
```
! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1F-JzAv35lZPTv6UmBGlvY82FKZ7QskHL' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1F-JzAv35lZPTv6UmBGlvY82FKZ7QskHL" -O obj_thermal_soccer_trained.zip && rm -rf /tmp/cookies.txt
! unzip -qq dataset_elephant_coco.zip
```
## This is how subset was created
```
! wget http://images.cocodataset.org/zips/train2014.zip
! wget http://images.cocodataset.org/zips/val2014.zip
! wget http://images.cocodataset.org/annotations/annotations_trainval2014.zip
! unzip -qq train2014.zip
! unzip -qq val2014.zip
! unzip -qq annotations_trainval2014.zip
! mkdir elephant
! mkdir elephant/train
! mkdir elephant/train/images
! mkdir elephant/val
! mkdir elephant/val/images
import json
import os
from tqdm import tqdm
with open('annotations/instances_train2014.json') as f:
data = json.load(f)
elephant_data = {};
elephant_data["info"] = data["info"]
elephant_data["licenses"] = data["licenses"]
elephant_data["categories"] = [{'supercategory': 'animal', 'id': 1, 'name': 'elephant'}]
elephant_data["images"] = [];
elephant_data["annotations"] = [];
image_ids = [];
from tqdm import tqdm
for i in tqdm(range(len(data["annotations"]))):
if(data["annotations"][i]["category_id"] == 22):
data["annotations"][i]["category_id"] = 1;
elephant_data["annotations"].append(data["annotations"][i])
image_ids.append(data["annotations"][i]["image_id"])
from tqdm import tqdm
for i in tqdm(range(len(data["images"]))):
if(data["images"][i]["id"] in image_ids):
elephant_data["images"].append(data["images"][i])
os.system("cp train2014/" + data["images"][i]['file_name'] + " elephant/train/images/")
with open('elephant/train/instances_train.json', 'w') as json_file:
json.dump(elephant_data, json_file)
import json
import os
from tqdm import tqdm
with open('annotations/instances_val2014.json') as f:
data = json.load(f)
elephant_data = {};
elephant_data["info"] = data["info"]
elephant_data["licenses"] = data["licenses"]
elephant_data["categories"] = [{'supercategory': 'animal', 'id': 1, 'name': 'elephant'}]
elephant_data["images"] = [];
elephant_data["annotations"] = [];
image_ids = [];
from tqdm import tqdm
for i in tqdm(range(len(data["annotations"]))):
if(data["annotations"][i]["category_id"] == 22):
data["annotations"][i]["category_id"] = 1;
elephant_data["annotations"].append(data["annotations"][i])
image_ids.append(data["annotations"][i]["image_id"])
from tqdm import tqdm
for i in tqdm(range(len(data["images"]))):
if(data["images"][i]["id"] in image_ids):
elephant_data["images"].append(data["images"][i])
os.system("cp val2014/" + data["images"][i]['file_name'] + " elephant/val/images/")
with open('elephant/val/instances_val.json', 'w') as json_file:
json.dump(elephant_data, json_file)
```
# Training
```
import os
import sys
sys.path.append("Monk_Object_Detection/20_solo/lib/")
from train_engine import Detector
gtf = Detector();
img_folder = "elephant/train/images"
anno_file = "elephant/train/instances_train.json"
class_file = "elephant/classes.txt"
gtf.Train_Dataset(img_folder, anno_file, class_file)
gtf.Dataset_Params(batch_size=2, num_workers=2)
gtf.List_Models();
gtf.Model_Params(model_name="solo_resnet50", gpu_devices=[0])
gtf.Hyper_Params(lr=0.001, momentum=0.9, weight_decay=0.0001);
gtf.Training_Params(num_epochs=10, save_interval=2);
gtf.Train();
```
# Inference
```
import os
import sys
sys.path.append("Monk_Object_Detection/20_solo/lib")
from infer_engine import Infer
gtf = Infer();
config_file = 'work_dirs/solo_resnet50/config_updated.py'
checkpoint_file = 'work_dirs/solo_resnet50//latest.pth'
gtf.Model_Params(config_file, checkpoint_file, use_gpu=True)
result = gtf.Predict(img_path="elephant/val/images/COCO_val2014_000000458311.jpg",
out_img_path="result.jpg",
thresh=0.3)
from IPython.display import Image
Image(filename='result.jpg')
result = gtf.Predict(img_path="elephant/val/images/COCO_val2014_000000200752.jpg",
out_img_path="result.jpg",
thresh=0.3)
from IPython.display import Image
Image(filename='result.jpg')
result = gtf.Predict(img_path="elephant/val/images/COCO_val2014_000000101959.jpg",
out_img_path="result.jpg",
thresh=0.3)
from IPython.display import Image
Image(filename='result.jpg')
```
|
github_jupyter
|
! git clone https://github.com/Tessellate-Imaging/Monk_Object_Detection.git
! cd Monk_Object_Detection/20_solo/installation && chmod +x install.sh && ./install.sh
! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1F-JzAv35lZPTv6UmBGlvY82FKZ7QskHL' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1F-JzAv35lZPTv6UmBGlvY82FKZ7QskHL" -O obj_thermal_soccer_trained.zip && rm -rf /tmp/cookies.txt
! unzip -qq dataset_elephant_coco.zip
! wget http://images.cocodataset.org/zips/train2014.zip
! wget http://images.cocodataset.org/zips/val2014.zip
! wget http://images.cocodataset.org/annotations/annotations_trainval2014.zip
! unzip -qq train2014.zip
! unzip -qq val2014.zip
! unzip -qq annotations_trainval2014.zip
! mkdir elephant
! mkdir elephant/train
! mkdir elephant/train/images
! mkdir elephant/val
! mkdir elephant/val/images
import json
import os
from tqdm import tqdm
with open('annotations/instances_train2014.json') as f:
data = json.load(f)
elephant_data = {};
elephant_data["info"] = data["info"]
elephant_data["licenses"] = data["licenses"]
elephant_data["categories"] = [{'supercategory': 'animal', 'id': 1, 'name': 'elephant'}]
elephant_data["images"] = [];
elephant_data["annotations"] = [];
image_ids = [];
from tqdm import tqdm
for i in tqdm(range(len(data["annotations"]))):
if(data["annotations"][i]["category_id"] == 22):
data["annotations"][i]["category_id"] = 1;
elephant_data["annotations"].append(data["annotations"][i])
image_ids.append(data["annotations"][i]["image_id"])
from tqdm import tqdm
for i in tqdm(range(len(data["images"]))):
if(data["images"][i]["id"] in image_ids):
elephant_data["images"].append(data["images"][i])
os.system("cp train2014/" + data["images"][i]['file_name'] + " elephant/train/images/")
with open('elephant/train/instances_train.json', 'w') as json_file:
json.dump(elephant_data, json_file)
import json
import os
from tqdm import tqdm
with open('annotations/instances_val2014.json') as f:
data = json.load(f)
elephant_data = {};
elephant_data["info"] = data["info"]
elephant_data["licenses"] = data["licenses"]
elephant_data["categories"] = [{'supercategory': 'animal', 'id': 1, 'name': 'elephant'}]
elephant_data["images"] = [];
elephant_data["annotations"] = [];
image_ids = [];
from tqdm import tqdm
for i in tqdm(range(len(data["annotations"]))):
if(data["annotations"][i]["category_id"] == 22):
data["annotations"][i]["category_id"] = 1;
elephant_data["annotations"].append(data["annotations"][i])
image_ids.append(data["annotations"][i]["image_id"])
from tqdm import tqdm
for i in tqdm(range(len(data["images"]))):
if(data["images"][i]["id"] in image_ids):
elephant_data["images"].append(data["images"][i])
os.system("cp val2014/" + data["images"][i]['file_name'] + " elephant/val/images/")
with open('elephant/val/instances_val.json', 'w') as json_file:
json.dump(elephant_data, json_file)
import os
import sys
sys.path.append("Monk_Object_Detection/20_solo/lib/")
from train_engine import Detector
gtf = Detector();
img_folder = "elephant/train/images"
anno_file = "elephant/train/instances_train.json"
class_file = "elephant/classes.txt"
gtf.Train_Dataset(img_folder, anno_file, class_file)
gtf.Dataset_Params(batch_size=2, num_workers=2)
gtf.List_Models();
gtf.Model_Params(model_name="solo_resnet50", gpu_devices=[0])
gtf.Hyper_Params(lr=0.001, momentum=0.9, weight_decay=0.0001);
gtf.Training_Params(num_epochs=10, save_interval=2);
gtf.Train();
import os
import sys
sys.path.append("Monk_Object_Detection/20_solo/lib")
from infer_engine import Infer
gtf = Infer();
config_file = 'work_dirs/solo_resnet50/config_updated.py'
checkpoint_file = 'work_dirs/solo_resnet50//latest.pth'
gtf.Model_Params(config_file, checkpoint_file, use_gpu=True)
result = gtf.Predict(img_path="elephant/val/images/COCO_val2014_000000458311.jpg",
out_img_path="result.jpg",
thresh=0.3)
from IPython.display import Image
Image(filename='result.jpg')
result = gtf.Predict(img_path="elephant/val/images/COCO_val2014_000000200752.jpg",
out_img_path="result.jpg",
thresh=0.3)
from IPython.display import Image
Image(filename='result.jpg')
result = gtf.Predict(img_path="elephant/val/images/COCO_val2014_000000101959.jpg",
out_img_path="result.jpg",
thresh=0.3)
from IPython.display import Image
Image(filename='result.jpg')
| 0.195594 | 0.85373 |
# ImageNet classification
This notebook shows an example of ImageNet classification
The network that is used for inference is a variant of DoReFaNet, whose topology is illustrated in the following picture.
The pink layers are executed in the Programmable Logic at reduced precision (1 bit for weights, 2 bit for activations) while the other layers are executed in python.
This notebook shows how to classify labeled images (extracted from the dataset), while [dorefanet-classification](./dorefanet-classification.ipynb) runs the classification on images choosen by the user

```
import os, pickle, random
from datetime import datetime
from matplotlib import pyplot as plt
from PIL import Image
%matplotlib inline
import numpy as np
import cv2
import qnn
from qnn import Dorefanet
from qnn import utils
```
## 1. Instantiate a Classifier
Creating a classifier will automatically download the bitstream onto the device, allocate memory buffers and load the network hyperparameters and weights.
The neural network to be implemented is specified in a json file (*dorefanet-layers.json* in this example)
The weights for the non-offloaded layers are also loaded in a numpy dictionary to be used for execution in python.
```
classifier = Dorefanet()
classifier.init_accelerator()
net = classifier.load_network(json_layer="/opt/python3.6/lib/python3.6/site-packages/qnn/params/dorefanet-layers.json")
conv0_weights = np.load('/opt/python3.6/lib/python3.6/site-packages/qnn/params/dorefanet-conv0.npy', encoding="latin1").item()
fc_weights = np.load('/opt/python3.6/lib/python3.6/site-packages/qnn/params/dorefanet-fc-normalized.npy', encoding='latin1').item()
```
## 2. Get ImageNet Classes information
Pick the random image from the imagenet-samples folder (image + correct class) and apply preprocessing transformation before inference. Thanks to the naming format adopted in these images (extracted from the validation set), the correct class is also displayed.
```
with open("imagenet-classes.pkl", 'rb') as f:
classes = pickle.load(f)
names = dict((k, classes[k][1].split(',')[0]) for k in classes.keys())
synsets = dict((classes[k][0], classes[k][1].split(',')[0]) for k in classes.keys())
```
## 3. Open image to be classified
Pick the random image from the imagenet-test folder (image + correct class) and apply preprocessing transformation before inference.
```
img_folder = './imagenet-samples/'
img_file = os.path.join(img_folder, random.choice(os.listdir(img_folder)))
img, img_class = classifier.load_image(img_file)
im = Image.open(img_file)
if img_class in synsets.keys():
print(synsets[img_class])
im
```
## 4. Execute the first convolutional layer in Python
The first layer of this neural network has not been quantized, thus will not be executed in the HW accelerator (which supports only quantized arithmetic).
Python provides, with numpy, a backend to execute convolution and other matrix operations. For user convenience the most used operations (convolutional layer, thresholding and fully connected layer) are provided in the class.
```
conv0_W = conv0_weights['conv0/W']
conv0_T = conv0_weights['conv0/T']
start = datetime.now()
# 1st convolutional layer execution, having as input the image and the trained parameters (weights)
conv0 = utils.conv_layer(img, conv0_W, stride=4)
# The result in then quantized to 2 bits representation for the subsequent HW offload
conv0 = utils.threshold(conv0, conv0_T)
end = datetime.now()
micros = int((end - start).total_seconds() * 1000000)
print("First layer SW implementation took {} microseconds".format(micros))
print(micros, file=open('timestamp.txt', 'w'))
```
### 5. HW Offload of the quantized layers
The core layers, which values have been quantized during training, are executed in the Programmable Logic. The hardware accelerator consists of a dataflow implementation of multiple layers (in this case, convolution+maxpool).
The host code parses the network topology (specified in the json file) and manages the sequence of execution on the accelerator.
```
# Compute offloaded convolutional layers
in_dim = net['conv0']['output'][1]
in_ch = net['conv0']['output'][0]
out_dim = net['merge4']['output_dim']
out_ch = net['merge4']['output_channels']
conv_output = classifier.get_accel_buffer(out_ch, out_dim);
conv_input = classifier.prepare_buffer(conv0)
start = datetime.now()
classifier.inference(conv_input, conv_output)
end = datetime.now()
micros = int((end - start).total_seconds() * 1000000)
print("HW implementation took {} microseconds".format(micros))
print(micros, file=open('timestamp.txt', 'a'))
conv_output = classifier.postprocess_buffer(conv_output)
```
## 6. Fully connected layers in python
The fully connected layers, are executed in the python backend and the classification finalized
```
# Normalize results
fc_input = conv_output / np.max(conv_output)
start = datetime.now()
# FC Layer 0
fc0_W = fc_weights['fc0/Wn']
fc0_b = fc_weights['fc0/bn']
fc0_out = utils.fully_connected(fc_input, fc0_W, fc0_b)
fc0_out = utils.qrelu(fc0_out)
fc0_out = utils.quantize(fc0_out, 2)
# FC Layer 1
fc1_W = fc_weights['fc1/Wn']
fc1_b = fc_weights['fc1/bn']
fc1_out = utils.fully_connected(fc0_out, fc1_W, fc1_b)
fc1_out = utils.qrelu(fc1_out)
# FC Layer 2
fct_W = fc_weights['fct/W']
fct_b = np.zeros((fct_W.shape[1], ))
fct_out = utils.fully_connected(fc1_out, fct_W, fct_b)
end = datetime.now()
micros = int((end - start).total_seconds() * 1000000)
print("Fully-connected layers took {} microseconds".format(micros))
print(micros, file=open('timestamp.txt', 'a'))
```
## 7. Classification Results
The top-5 results of the inference are provided with the corresponding human readable labels.
The final classification scores are computed by a SoftMax Operator, that gives the normalized probabilities for all the classes.
```
# Softmax
out = utils.softmax(fct_out)
# Top-5 results
topn = utils.get_topn_indexes(out, 5)
x_pos = np.arange(len(topn))
plt.barh(x_pos, out[topn], height=0.4, color='g', zorder=3)
plt.yticks(x_pos, [names[k] for k in topn])
plt.gca().invert_yaxis()
plt.xlim([0,1])
plt.grid(zorder=0)
plt.show()
if img_class in synsets.keys():
print("Image class: {:>5}\nPredictions:".format(synsets[img_class]))
for k in topn: print("class:{0:>15}\tprobability:{1:>8.2%}".format(names[k].lower(), out[k]))
if synsets[img_class] in (names[k] for k in topn):
print("\nMatch!")
else:
for k in topn: print("class:{0:>20}\tprobability:{1:>8.2%}".format(names[k].lower(), out[k]))
```
## 8. Performance evaluation
Show the performance of both software and hardware execution in terms of execution time, number of operations and number of operations over time.
The software execution includes the first convolutional layer and the fully connected layers, while the hardware execution includes all the offloaded convolutional layers
```
array = np.loadtxt('timestamp.txt')
array = list(map(lambda x: x/1000000, array))
MOPS = [238.176256, 1073.856969]
TIME = [array[0] + array[2], array[1]]
MOPSS = [m / t for (m, t) in zip(MOPS ,TIME)]
LABELS = ['SW', 'HW']
f, ((ax1, ax2, ax3)) = plt.subplots(1, 3, sharex='col', sharey='row', figsize=(15,2))
x_pos = np.arange(len(LABELS))
plt.yticks(x_pos, LABELS)
ax1.barh(x_pos, TIME, height=0.6, color='r', zorder=3)
ax1.invert_yaxis()
ax1.set_xlabel("Execution Time [s]")
ax1.set_ylabel("Platform")
ax1.grid(zorder=0)
ax2.barh(x_pos, MOPS, height=0.6, color='g', zorder=3)
ax2.invert_yaxis()
ax2.set_xlabel("# of Operations [MOPS]")
ax2.grid(zorder=0)
ax3.barh(x_pos, MOPSS, height=0.6, color='b', zorder=3)
ax3.invert_yaxis()
ax3.set_xlabel("Performances [MOPS/s]")
ax3.grid(zorder=0)
plt.show()
```
## Reset the device
```
classifier.deinit_accelerator()
from pynq import Xlnk
xlnk = Xlnk();
xlnk.xlnk_reset()
```
|
github_jupyter
|
import os, pickle, random
from datetime import datetime
from matplotlib import pyplot as plt
from PIL import Image
%matplotlib inline
import numpy as np
import cv2
import qnn
from qnn import Dorefanet
from qnn import utils
classifier = Dorefanet()
classifier.init_accelerator()
net = classifier.load_network(json_layer="/opt/python3.6/lib/python3.6/site-packages/qnn/params/dorefanet-layers.json")
conv0_weights = np.load('/opt/python3.6/lib/python3.6/site-packages/qnn/params/dorefanet-conv0.npy', encoding="latin1").item()
fc_weights = np.load('/opt/python3.6/lib/python3.6/site-packages/qnn/params/dorefanet-fc-normalized.npy', encoding='latin1').item()
with open("imagenet-classes.pkl", 'rb') as f:
classes = pickle.load(f)
names = dict((k, classes[k][1].split(',')[0]) for k in classes.keys())
synsets = dict((classes[k][0], classes[k][1].split(',')[0]) for k in classes.keys())
img_folder = './imagenet-samples/'
img_file = os.path.join(img_folder, random.choice(os.listdir(img_folder)))
img, img_class = classifier.load_image(img_file)
im = Image.open(img_file)
if img_class in synsets.keys():
print(synsets[img_class])
im
conv0_W = conv0_weights['conv0/W']
conv0_T = conv0_weights['conv0/T']
start = datetime.now()
# 1st convolutional layer execution, having as input the image and the trained parameters (weights)
conv0 = utils.conv_layer(img, conv0_W, stride=4)
# The result in then quantized to 2 bits representation for the subsequent HW offload
conv0 = utils.threshold(conv0, conv0_T)
end = datetime.now()
micros = int((end - start).total_seconds() * 1000000)
print("First layer SW implementation took {} microseconds".format(micros))
print(micros, file=open('timestamp.txt', 'w'))
# Compute offloaded convolutional layers
in_dim = net['conv0']['output'][1]
in_ch = net['conv0']['output'][0]
out_dim = net['merge4']['output_dim']
out_ch = net['merge4']['output_channels']
conv_output = classifier.get_accel_buffer(out_ch, out_dim);
conv_input = classifier.prepare_buffer(conv0)
start = datetime.now()
classifier.inference(conv_input, conv_output)
end = datetime.now()
micros = int((end - start).total_seconds() * 1000000)
print("HW implementation took {} microseconds".format(micros))
print(micros, file=open('timestamp.txt', 'a'))
conv_output = classifier.postprocess_buffer(conv_output)
# Normalize results
fc_input = conv_output / np.max(conv_output)
start = datetime.now()
# FC Layer 0
fc0_W = fc_weights['fc0/Wn']
fc0_b = fc_weights['fc0/bn']
fc0_out = utils.fully_connected(fc_input, fc0_W, fc0_b)
fc0_out = utils.qrelu(fc0_out)
fc0_out = utils.quantize(fc0_out, 2)
# FC Layer 1
fc1_W = fc_weights['fc1/Wn']
fc1_b = fc_weights['fc1/bn']
fc1_out = utils.fully_connected(fc0_out, fc1_W, fc1_b)
fc1_out = utils.qrelu(fc1_out)
# FC Layer 2
fct_W = fc_weights['fct/W']
fct_b = np.zeros((fct_W.shape[1], ))
fct_out = utils.fully_connected(fc1_out, fct_W, fct_b)
end = datetime.now()
micros = int((end - start).total_seconds() * 1000000)
print("Fully-connected layers took {} microseconds".format(micros))
print(micros, file=open('timestamp.txt', 'a'))
# Softmax
out = utils.softmax(fct_out)
# Top-5 results
topn = utils.get_topn_indexes(out, 5)
x_pos = np.arange(len(topn))
plt.barh(x_pos, out[topn], height=0.4, color='g', zorder=3)
plt.yticks(x_pos, [names[k] for k in topn])
plt.gca().invert_yaxis()
plt.xlim([0,1])
plt.grid(zorder=0)
plt.show()
if img_class in synsets.keys():
print("Image class: {:>5}\nPredictions:".format(synsets[img_class]))
for k in topn: print("class:{0:>15}\tprobability:{1:>8.2%}".format(names[k].lower(), out[k]))
if synsets[img_class] in (names[k] for k in topn):
print("\nMatch!")
else:
for k in topn: print("class:{0:>20}\tprobability:{1:>8.2%}".format(names[k].lower(), out[k]))
array = np.loadtxt('timestamp.txt')
array = list(map(lambda x: x/1000000, array))
MOPS = [238.176256, 1073.856969]
TIME = [array[0] + array[2], array[1]]
MOPSS = [m / t for (m, t) in zip(MOPS ,TIME)]
LABELS = ['SW', 'HW']
f, ((ax1, ax2, ax3)) = plt.subplots(1, 3, sharex='col', sharey='row', figsize=(15,2))
x_pos = np.arange(len(LABELS))
plt.yticks(x_pos, LABELS)
ax1.barh(x_pos, TIME, height=0.6, color='r', zorder=3)
ax1.invert_yaxis()
ax1.set_xlabel("Execution Time [s]")
ax1.set_ylabel("Platform")
ax1.grid(zorder=0)
ax2.barh(x_pos, MOPS, height=0.6, color='g', zorder=3)
ax2.invert_yaxis()
ax2.set_xlabel("# of Operations [MOPS]")
ax2.grid(zorder=0)
ax3.barh(x_pos, MOPSS, height=0.6, color='b', zorder=3)
ax3.invert_yaxis()
ax3.set_xlabel("Performances [MOPS/s]")
ax3.grid(zorder=0)
plt.show()
classifier.deinit_accelerator()
from pynq import Xlnk
xlnk = Xlnk();
xlnk.xlnk_reset()
| 0.542379 | 0.948394 |
## Our Mission ##
You recently used Naive Bayes to classify spam in this [dataset](https://archive.ics.uci.edu/ml/datasets/SMS+Spam+Collection). In this notebook, we will expand on the previous analysis by using a few of the new techniques you saw throughout this lesson.
> In order to get caught back up to speed with what was done in the previous notebook, run the cell below
```
# Import our libraries
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
# Read in our dataset
df = pd.read_table('smsspamcollection/SMSSpamCollection',
sep='\t',
header=None,
names=['label', 'sms_message'])
# Fix our response value
df['label'] = df.label.map({'ham':0, 'spam':1})
# Split our dataset into training and testing data
X_train, X_test, y_train, y_test = train_test_split(df['sms_message'],
df['label'],
random_state=1)
# Instantiate the CountVectorizer method
count_vector = CountVectorizer()
# Fit the training data and then return the matrix
training_data = count_vector.fit_transform(X_train)
# Transform testing data and return the matrix. Note we are not fitting the testing data into the CountVectorizer()
testing_data = count_vector.transform(X_test)
# Instantiate our model
naive_bayes = MultinomialNB()
# Fit our model to the training data
naive_bayes.fit(training_data, y_train)
# Predict on the test data
predictions = naive_bayes.predict(testing_data)
# Score our model
print('Accuracy score: ', format(accuracy_score(y_test, predictions)))
print('Precision score: ', format(precision_score(y_test, predictions)))
print('Recall score: ', format(recall_score(y_test, predictions)))
print('F1 score: ', format(f1_score(y_test, predictions)))
```
### Turns Out...
It turns out that our naive bayes model actually does a pretty good job. However, let's take a look at a few additional models to see if we can't improve anyway.
Specifically in this notebook, we will take a look at the following techniques:
* [BaggingClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.BaggingClassifier.html#sklearn.ensemble.BaggingClassifier)
* [RandomForestClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html#sklearn.ensemble.RandomForestClassifier)
* [AdaBoostClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.AdaBoostClassifier.html#sklearn.ensemble.AdaBoostClassifier)
Another really useful guide for ensemble methods can be found [in the documentation here](http://scikit-learn.org/stable/modules/ensemble.html).
These ensemble methods use a combination of techniques you have seen throughout this lesson:
* **Bootstrap the data** passed through a learner (bagging).
* **Subset the features** used for a learner (combined with bagging signifies the two random components of random forests).
* **Ensemble learners** together in a way that allows those that perform best in certain areas to create the largest impact (boosting).
In this notebook, let's get some practice with these methods, which will also help you get comfortable with the process used for performing supervised machine learning in python in general.
Since you cleaned and vectorized the text in the previous notebook, this notebook can be focused on the fun part - the machine learning part.
### This Process Looks Familiar...
In general, there is a five step process that can be used each type you want to use a supervised learning method (which you actually used above):
1. **Import** the model.
2. **Instantiate** the model with the hyperparameters of interest.
3. **Fit** the model to the training data.
4. **Predict** on the test data.
5. **Score** the model by comparing the predictions to the actual values.
Follow the steps through this notebook to perform these steps using each of the ensemble methods: **BaggingClassifier**, **RandomForestClassifier**, and **AdaBoostClassifier**.
> **Step 1**: First use the documentation to `import` all three of the models.
```
# Import the Bagging, RandomForest, and AdaBoost Classifier
from sklearn.ensemble import BaggingClassifier, RandomForestClassifier, AdaBoostClassifier
```
> **Step 2:** Now that you have imported each of the classifiers, `instantiate` each with the hyperparameters specified in each comment. In the upcoming lessons, you will see how we can automate the process to finding the best hyperparameters. For now, let's get comfortable with the process and our new algorithms.
```
# Instantiate a BaggingClassifier with:
# 200 weak learners (n_estimators) and everything else as default values
bag_mod = BaggingClassifier(n_estimators=200)
# Instantiate a RandomForestClassifier with:
# 200 weak learners (n_estimators) and everything else as default values
rf_mod = RandomForestClassifier(n_estimators=200)
# Instantiate an a AdaBoostClassifier with:
# With 300 weak learners (n_estimators) and a learning_rate of 0.2
ada_mod = AdaBoostClassifier(n_estimators=300, learning_rate=0.2)
```
> **Step 3:** Now that you have instantiated each of your models, `fit` them using the **training_data** and **y_train**. This may take a bit of time, you are fitting 700 weak learners after all!
```
# Fit your BaggingClassifier to the training data
bag_mod.fit(training_data, y_train)
# Fit your RandomForestClassifier to the training data
rf_mod.fit(training_data, y_train)
# Fit your AdaBoostClassifier to the training data
ada_mod.fit(training_data, y_train)
```
> **Step 4:** Now that you have fit each of your models, you will use each to `predict` on the **testing_data**.
```
# Predict using BaggingClassifier on the test data
bag_preds = bag_mod.predict(testing_data)
# Predict using RandomForestClassifier on the test data
rf_preds = rf_mod.predict(testing_data)
# Predict using AdaBoostClassifier on the test data
ada_preds = ada_mod.predict(testing_data)
```
> **Step 5:** Now that you have made your predictions, compare your predictions to the actual values using the function below for each of your models - this will give you the `score` for how well each of your models is performing. It might also be useful to show the naive bayes model again here.
```
def print_metrics(y_true, preds, model_name=None):
'''
INPUT:
y_true - the y values that are actually true in the dataset (numpy array or pandas series)
preds - the predictions for those values from some model (numpy array or pandas series)
model_name - (str - optional) a name associated with the model if you would like to add it to the print statements
OUTPUT:
None - prints the accuracy, precision, recall, and F1 score
'''
if model_name == None:
print('Accuracy score: ', format(accuracy_score(y_true, preds)))
print('Precision score: ', format(precision_score(y_true, preds)))
print('Recall score: ', format(recall_score(y_true, preds)))
print('F1 score: ', format(f1_score(y_true, preds)))
print('\n\n')
else:
print('Accuracy score for ' + model_name + ' :' , format(accuracy_score(y_true, preds)))
print('Precision score ' + model_name + ' :', format(precision_score(y_true, preds)))
print('Recall score ' + model_name + ' :', format(recall_score(y_true, preds)))
print('F1 score ' + model_name + ' :', format(f1_score(y_true, preds)))
print('\n\n')
# Print Bagging scores
print_metrics(y_test, bag_preds, 'bagging')
# Print Random Forest scores
print_metrics(y_test, rf_preds, 'random forest')
# Print AdaBoost scores
print_metrics(y_test, ada_preds, 'adaboost')
# Naive Bayes Classifier scores
print_metrics(y_test, predictions, 'naive bayes')
```
### Recap
Now you have seen the whole process for a few ensemble models!
1. **Import** the model.
2. **Instantiate** the model with the hyperparameters of interest.
3. **Fit** the model to the training data.
4. **Predict** on the test data.
5. **Score** the model by comparing the predictions to the actual values.
This is a very common process for performing machine learning.
### But, Wait...
You might be asking -
* What do these metrics mean?
* How do I optimize to get the best model?
* There are so many hyperparameters to each of these models, how do I figure out what the best values are for each?
**This is exactly what the last two lessons of this course on supervised learning are all about.**
|
github_jupyter
|
# Import our libraries
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
# Read in our dataset
df = pd.read_table('smsspamcollection/SMSSpamCollection',
sep='\t',
header=None,
names=['label', 'sms_message'])
# Fix our response value
df['label'] = df.label.map({'ham':0, 'spam':1})
# Split our dataset into training and testing data
X_train, X_test, y_train, y_test = train_test_split(df['sms_message'],
df['label'],
random_state=1)
# Instantiate the CountVectorizer method
count_vector = CountVectorizer()
# Fit the training data and then return the matrix
training_data = count_vector.fit_transform(X_train)
# Transform testing data and return the matrix. Note we are not fitting the testing data into the CountVectorizer()
testing_data = count_vector.transform(X_test)
# Instantiate our model
naive_bayes = MultinomialNB()
# Fit our model to the training data
naive_bayes.fit(training_data, y_train)
# Predict on the test data
predictions = naive_bayes.predict(testing_data)
# Score our model
print('Accuracy score: ', format(accuracy_score(y_test, predictions)))
print('Precision score: ', format(precision_score(y_test, predictions)))
print('Recall score: ', format(recall_score(y_test, predictions)))
print('F1 score: ', format(f1_score(y_test, predictions)))
# Import the Bagging, RandomForest, and AdaBoost Classifier
from sklearn.ensemble import BaggingClassifier, RandomForestClassifier, AdaBoostClassifier
# Instantiate a BaggingClassifier with:
# 200 weak learners (n_estimators) and everything else as default values
bag_mod = BaggingClassifier(n_estimators=200)
# Instantiate a RandomForestClassifier with:
# 200 weak learners (n_estimators) and everything else as default values
rf_mod = RandomForestClassifier(n_estimators=200)
# Instantiate an a AdaBoostClassifier with:
# With 300 weak learners (n_estimators) and a learning_rate of 0.2
ada_mod = AdaBoostClassifier(n_estimators=300, learning_rate=0.2)
# Fit your BaggingClassifier to the training data
bag_mod.fit(training_data, y_train)
# Fit your RandomForestClassifier to the training data
rf_mod.fit(training_data, y_train)
# Fit your AdaBoostClassifier to the training data
ada_mod.fit(training_data, y_train)
# Predict using BaggingClassifier on the test data
bag_preds = bag_mod.predict(testing_data)
# Predict using RandomForestClassifier on the test data
rf_preds = rf_mod.predict(testing_data)
# Predict using AdaBoostClassifier on the test data
ada_preds = ada_mod.predict(testing_data)
def print_metrics(y_true, preds, model_name=None):
'''
INPUT:
y_true - the y values that are actually true in the dataset (numpy array or pandas series)
preds - the predictions for those values from some model (numpy array or pandas series)
model_name - (str - optional) a name associated with the model if you would like to add it to the print statements
OUTPUT:
None - prints the accuracy, precision, recall, and F1 score
'''
if model_name == None:
print('Accuracy score: ', format(accuracy_score(y_true, preds)))
print('Precision score: ', format(precision_score(y_true, preds)))
print('Recall score: ', format(recall_score(y_true, preds)))
print('F1 score: ', format(f1_score(y_true, preds)))
print('\n\n')
else:
print('Accuracy score for ' + model_name + ' :' , format(accuracy_score(y_true, preds)))
print('Precision score ' + model_name + ' :', format(precision_score(y_true, preds)))
print('Recall score ' + model_name + ' :', format(recall_score(y_true, preds)))
print('F1 score ' + model_name + ' :', format(f1_score(y_true, preds)))
print('\n\n')
# Print Bagging scores
print_metrics(y_test, bag_preds, 'bagging')
# Print Random Forest scores
print_metrics(y_test, rf_preds, 'random forest')
# Print AdaBoost scores
print_metrics(y_test, ada_preds, 'adaboost')
# Naive Bayes Classifier scores
print_metrics(y_test, predictions, 'naive bayes')
| 0.751375 | 0.97024 |
```
%matplotlib inline
```
# Opt 1 parameter
```
def run(Plot, Save):
return
import numpy as np
from PyMieSim import Material
from PyMieSim.Scatterer import Sphere
from PyMieSim.Detector import Photodiode, LPmode
from PyMieSim.Source import PlaneWave
from PyMieSim.Experiment import ScatSet, SourceSet, Setup, DetectorSet
DiameterList = np.linspace(100e-9, 1000e-9, 200)
Detector0 = Photodiode(NA = 0.1,
Sampling = 300,
GammaOffset = 20,
PhiOffset = 0,
CouplingMode = 'Centered')
scatKwargs = { 'Diameter' : np.linspace(400e-9, 2000e-9, 200),
'Material' : Material('BK7'),
'nMedium' : [1] }
sourceKwargs = { 'Wavelength' : 1e-6,
'Polarization' : [0]}
Detector0 = Photodiode(NA = 2.0,
Sampling = 300,
GammaOffset = 0,
PhiOffset = 0,
CouplingMode = 'Centered')
detecSet = DetectorSet([Detector0])
scatSet = ScatSet(Scatterer = Sphere, kwargs = scatKwargs )
sourceSet = SourceSet(Source = PlaneWave, kwargs = sourceKwargs )
Experiment = Setup(ScattererSet = scatSet,
SourceSet = sourceSet,
DetectorSet = detecSet)
# Metric can be "max"
# "min"
# "mean"
# "std+RI"
# "std+Diameter"
# "std+Polarization"
# "std+Wavelength"
# "std+Detector"
# "monotonic+RI"
# "monotonic+Diameter"
# "monotonic+Polarization"
# "monotonic+Wavelength"
# "monotonic+Detector"
Opt = Experiment.Optimize(Setup = Experiment,
Metric = 'mean',
Parameter = ['PhiOffset'],
Optimum = 'Maximum',
MinVal = [1e-5],
MaxVal = [180],
WhichDetector = 0,
X0 = [0.6],
MaxIter = 350,
Tol = 1e-4,
FirstStride = 30)
print(Opt.Result)
df = Experiment.Coupling(AsType='dataframe')
if Plot:
df.Plot(y='Coupling', x='Diameter') # can be "Couplimg" or "STD"
if __name__ == '__main__':
run(Plot=True, Save=False)
```
|
github_jupyter
|
%matplotlib inline
def run(Plot, Save):
return
import numpy as np
from PyMieSim import Material
from PyMieSim.Scatterer import Sphere
from PyMieSim.Detector import Photodiode, LPmode
from PyMieSim.Source import PlaneWave
from PyMieSim.Experiment import ScatSet, SourceSet, Setup, DetectorSet
DiameterList = np.linspace(100e-9, 1000e-9, 200)
Detector0 = Photodiode(NA = 0.1,
Sampling = 300,
GammaOffset = 20,
PhiOffset = 0,
CouplingMode = 'Centered')
scatKwargs = { 'Diameter' : np.linspace(400e-9, 2000e-9, 200),
'Material' : Material('BK7'),
'nMedium' : [1] }
sourceKwargs = { 'Wavelength' : 1e-6,
'Polarization' : [0]}
Detector0 = Photodiode(NA = 2.0,
Sampling = 300,
GammaOffset = 0,
PhiOffset = 0,
CouplingMode = 'Centered')
detecSet = DetectorSet([Detector0])
scatSet = ScatSet(Scatterer = Sphere, kwargs = scatKwargs )
sourceSet = SourceSet(Source = PlaneWave, kwargs = sourceKwargs )
Experiment = Setup(ScattererSet = scatSet,
SourceSet = sourceSet,
DetectorSet = detecSet)
# Metric can be "max"
# "min"
# "mean"
# "std+RI"
# "std+Diameter"
# "std+Polarization"
# "std+Wavelength"
# "std+Detector"
# "monotonic+RI"
# "monotonic+Diameter"
# "monotonic+Polarization"
# "monotonic+Wavelength"
# "monotonic+Detector"
Opt = Experiment.Optimize(Setup = Experiment,
Metric = 'mean',
Parameter = ['PhiOffset'],
Optimum = 'Maximum',
MinVal = [1e-5],
MaxVal = [180],
WhichDetector = 0,
X0 = [0.6],
MaxIter = 350,
Tol = 1e-4,
FirstStride = 30)
print(Opt.Result)
df = Experiment.Coupling(AsType='dataframe')
if Plot:
df.Plot(y='Coupling', x='Diameter') # can be "Couplimg" or "STD"
if __name__ == '__main__':
run(Plot=True, Save=False)
| 0.567337 | 0.656837 |
## 什么是kNN算法
邻近算法,或者说K最近邻(kNN,k-NearestNeighbor)分类算法是数据挖掘分类技术中最简单的方法之一。所谓K最近邻,就是k个最近的邻居的意思,说的是每个样本都可以用它最接近的k个邻居来代表。
kNN算法的核心思想是如果一个样本在特征空间中的k个最相邻的样本中的大多数属于某一个类别,则该样本也属于这个类别,并具有这个类别上样本的特性。该方法在确定分类决策上只依据最邻近的一个或者几个样本的类别来决定待分样本所属的类别。 kNN方法在类别决策时,只与极少量的相邻样本有关。由于kNN方法主要靠周围有限的邻近的样本,而不是靠判别类域的方法来确定所属类别的,因此对于类域的交叉或重叠较多的待分样本集来说,kNN方法较其他方法更为适合。
## 相关数学知识
* 两点间距离计算公式:
```
import math
a = (1, 2)
b = (3, 4)
dist = math.sqrt(pow(a[0] - b[0], 2) + pow(a[0], b[0]))
print(dist)
```
## 环境准备
* Mac OS 10.13
* Python 3.6.5
* jupyter notebook
* 安装numpy
* 安装scipy
* 安装matplotlib
* 安装opencv-python
* 安装ipython
* 安装jupyter
```
pip install numpy
pip install scipy
pip install matplotlib
pip install opencv-python
pip install ipython
pip install jupyter
```
## Source Code
```
import cv2
import numpy as np
import matplotlib.pyplot as plt
train_data = np.random.randint(0, 100, (25, 2)).astype(np.float32)
responses = np.random.randint(0, 2, (25, 1)).astype(np.float32)
red = train_data[responses.ravel() == 0]
plt.scatter(red[:,0], red[:,1], 80, 'r', '^')
blue = train_data[responses.ravel() == 1]
plt.scatter(blue[:,0], blue[:,1], 80, 'b', 's')
# plt.show()
newcomer = np.random.randint(0, 100, (1, 2)).astype(np.float32)
plt.scatter(newcomer[:,0], newcomer[:,1], 80, 'g', 'o')
knn = cv2.ml.KNearest_create()
knn.train(train_data, cv2.ml.ROW_SAMPLE, responses)
ret, results, neighbours, dist = knn.findNearest(newcomer, 3)
print("result: ", results)
print("neighbours: ", neighbours)
print("distance: ", dist)
```
## 代码解释
* 生成待训练的数据和标签
```python
train_data = np.random.randint(0, 100, (25, 2)).astype(np.float32)
responses = np.random.randint(0, 2, (25, 1)).astype(np.float32)
```
* 在图中标记红色样本
```python
red = train_data[responses.ravel() == 0]
plt.scatter(red[:,0], red[:,1], 80, 'r', '^')
```
* 在图中标记蓝色样本
```python
blue = train_data[responses.ravel() == 1]
plt.scatter(blue[:,0], blue[:,1], 80, 'b', 's')
```
* 产生待分类数据
```python
newcomer = np.random.randint(0, 100, (1, 2)).astype(np.float32)
plt.scatter(newcomer[:,0], newcomer[:,1], 80, 'g', 'o')
```
* 训练样本并产生分类
```python
knn = cv2.ml.KNearest_create()
knn.train(train_data, cv2.ml.ROW_SAMPLE, responses)
```
* 给新数据分类:K设置为5
```python
ret, results, neighbours, dist = knn.findNearest(newcomer, 5)
```
## Referer
1. [Understanding k-Nearest Neighbour][4]
2. [wikipedia: k-nearest neighbors algorithm][5]
3. [kNN(K-Nearest Neighbor)最邻近规则分类][6]
[1]: https://segmentfault.com/a/1190000007715243
[2]: http://www.numpy.org/
[3]: http://matplotlib.org/
[4]: http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_ml/py_knn/py_knn_understanding/py_knn_understanding.html
[5]: https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm
[6]: http://blog.csdn.net/xlm289348/article/details/8876353
|
github_jupyter
|
import math
a = (1, 2)
b = (3, 4)
dist = math.sqrt(pow(a[0] - b[0], 2) + pow(a[0], b[0]))
print(dist)
pip install numpy
pip install scipy
pip install matplotlib
pip install opencv-python
pip install ipython
pip install jupyter
import cv2
import numpy as np
import matplotlib.pyplot as plt
train_data = np.random.randint(0, 100, (25, 2)).astype(np.float32)
responses = np.random.randint(0, 2, (25, 1)).astype(np.float32)
red = train_data[responses.ravel() == 0]
plt.scatter(red[:,0], red[:,1], 80, 'r', '^')
blue = train_data[responses.ravel() == 1]
plt.scatter(blue[:,0], blue[:,1], 80, 'b', 's')
# plt.show()
newcomer = np.random.randint(0, 100, (1, 2)).astype(np.float32)
plt.scatter(newcomer[:,0], newcomer[:,1], 80, 'g', 'o')
knn = cv2.ml.KNearest_create()
knn.train(train_data, cv2.ml.ROW_SAMPLE, responses)
ret, results, neighbours, dist = knn.findNearest(newcomer, 3)
print("result: ", results)
print("neighbours: ", neighbours)
print("distance: ", dist)
train_data = np.random.randint(0, 100, (25, 2)).astype(np.float32)
responses = np.random.randint(0, 2, (25, 1)).astype(np.float32)
red = train_data[responses.ravel() == 0]
plt.scatter(red[:,0], red[:,1], 80, 'r', '^')
blue = train_data[responses.ravel() == 1]
plt.scatter(blue[:,0], blue[:,1], 80, 'b', 's')
newcomer = np.random.randint(0, 100, (1, 2)).astype(np.float32)
plt.scatter(newcomer[:,0], newcomer[:,1], 80, 'g', 'o')
knn = cv2.ml.KNearest_create()
knn.train(train_data, cv2.ml.ROW_SAMPLE, responses)
ret, results, neighbours, dist = knn.findNearest(newcomer, 5)
| 0.33939 | 0.879458 |
- 倒序打印用户输入的n个单词。
```
words=[]
i=0
n=int(input('please enter an integer as the number of the word you want to enter. '))
while i < n :
words.append(input('please enter a word. '))
i+=1
for i in range (n-1,n-11,-1):
print(words[i])
```
- 蒙特卡洛方法求π的值
```
import random
def mentekaluo_pi (n):
inside_number=0
for i in range (n):
x,y=random.random(),random.random()
if x*x+y*y<=1 :
inside_number+=1
return 4*inside_number/n
n=int(input('please enter times. '))
pi=mentekaluo_pi(n)
print(pi)
#9*9 乘法表
for i in range(1, 10):
j=1
while j <= i :
print('|{}x{}={:2}| '.format(i, j, i*j), end = '')
j+=1
print()
print('---------------------------------------------------------------------------------------------------------------------------')
```
- 将前面几章用while循环的习题,用for循环实现,并尽量写成函数。
- 1.可求m!+n!+k!
- 2.写函数可返回1 - 1/3 + 1/5 - 1/7...的前n项的和。在主程序中,分别令n=1000及100000,打印4倍该函数的和。
```
#1
def factorial (m):
result=1
for i in range(1,m+1,1):
result*=i
return result
m=int(input('please enter an integer. '))
n=int(input('please enter an integer. '))
k=int(input('please enter an integer. '))
total=factorial(m)+factorial(n)+factorial(k)
print(total)
# 2
def calculate (n):
sum=0
for i in range (1,n+1,1) :
if i%2==0 :
sum-=1/(i*2-1)
else :
sum+=1/(i*2-1)
return sum
print(4*calculate(1000))
print(4*calculate(100000))
```
- 写函数,返回一个list中所有数字的和
```
def sum_list (mylist):
mysum=sum(mylist)
return mysum
number=[1,2,3,6,5,9]
print('The sum of the number in the list is :',sum_list(number))
```
- 写函数,返回一个list中最小的数
```
def find_min (mylist):
mymin=min(mylist)
return mymin
number=[1,2,3,6,5,9]
print('The smallest number in the list is :',find_min(number))
```
- 写函数,返回某个元素/对象在一个list中的位置,如果不在,则返回-1.
```
def find_pos (mylist,m):
pos=-1
for i in range(len(mylist)) :
if m==mylist[i] :
pos=i
return pos
number=[1,2,3,4,5,6,7,8,9]
i=int(input('plesase enter an integer. '))
pos=find_pos(number,i)
if pos==-1:
print(i,'不在列表中。')
else :
print(i,'在列表中的位置为第:',pos+1,'个')
```
- 练习二:写函数,可求两个向量的夹角余弦值,向量可放在list中。主程序调用该函数。
```
import math
def calculate_cos(a,b):
moa=0
mob=0
s=0
for x in a :
moa+=x*x
for y in b :
mob+=y*y
for i in range(len(a)) :
s+=a[i]*b[i]
cos=s/(math.sqrt(moa*mob))
return cos
a=[]
b=[]
while True :
x=input('请输入向量a的一个坐标值,以f 结束')
if x=='f' :
break
a.append(int(x))
while True :
y=input('请输入向量b的一个坐标值,以f 结束')
if y=='f' :
break
b.append(int(y))
print('这两个向量夹角的余弦值是:',calculate_cos(a,b) )
```
- 挑战性习题:python语言老师为了激励学生学python,自费买了100个完全相同的Macbook Pro,分给三个班级,每个班级至少分5个,用穷举法计算共有多少种分法?
```
method=0
for i in range (5,66,1):
for j in range (5,66,1):
for k in range(5,66,1) :
if i+j+k==100 :
method+=1
print(method)
```
|
github_jupyter
|
words=[]
i=0
n=int(input('please enter an integer as the number of the word you want to enter. '))
while i < n :
words.append(input('please enter a word. '))
i+=1
for i in range (n-1,n-11,-1):
print(words[i])
import random
def mentekaluo_pi (n):
inside_number=0
for i in range (n):
x,y=random.random(),random.random()
if x*x+y*y<=1 :
inside_number+=1
return 4*inside_number/n
n=int(input('please enter times. '))
pi=mentekaluo_pi(n)
print(pi)
#9*9 乘法表
for i in range(1, 10):
j=1
while j <= i :
print('|{}x{}={:2}| '.format(i, j, i*j), end = '')
j+=1
print()
print('---------------------------------------------------------------------------------------------------------------------------')
#1
def factorial (m):
result=1
for i in range(1,m+1,1):
result*=i
return result
m=int(input('please enter an integer. '))
n=int(input('please enter an integer. '))
k=int(input('please enter an integer. '))
total=factorial(m)+factorial(n)+factorial(k)
print(total)
# 2
def calculate (n):
sum=0
for i in range (1,n+1,1) :
if i%2==0 :
sum-=1/(i*2-1)
else :
sum+=1/(i*2-1)
return sum
print(4*calculate(1000))
print(4*calculate(100000))
def sum_list (mylist):
mysum=sum(mylist)
return mysum
number=[1,2,3,6,5,9]
print('The sum of the number in the list is :',sum_list(number))
def find_min (mylist):
mymin=min(mylist)
return mymin
number=[1,2,3,6,5,9]
print('The smallest number in the list is :',find_min(number))
def find_pos (mylist,m):
pos=-1
for i in range(len(mylist)) :
if m==mylist[i] :
pos=i
return pos
number=[1,2,3,4,5,6,7,8,9]
i=int(input('plesase enter an integer. '))
pos=find_pos(number,i)
if pos==-1:
print(i,'不在列表中。')
else :
print(i,'在列表中的位置为第:',pos+1,'个')
import math
def calculate_cos(a,b):
moa=0
mob=0
s=0
for x in a :
moa+=x*x
for y in b :
mob+=y*y
for i in range(len(a)) :
s+=a[i]*b[i]
cos=s/(math.sqrt(moa*mob))
return cos
a=[]
b=[]
while True :
x=input('请输入向量a的一个坐标值,以f 结束')
if x=='f' :
break
a.append(int(x))
while True :
y=input('请输入向量b的一个坐标值,以f 结束')
if y=='f' :
break
b.append(int(y))
print('这两个向量夹角的余弦值是:',calculate_cos(a,b) )
method=0
for i in range (5,66,1):
for j in range (5,66,1):
for k in range(5,66,1) :
if i+j+k==100 :
method+=1
print(method)
| 0.066191 | 0.721792 |
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.
# Auto ML 04: Remote Execution with Text Data from Azure Blob Storage
In this example we use the [Burning Man 2016 dataset](https://innovate.burningman.org/datasets-page/) to showcase how you can use AutoML to handle text data from Azure Blob Storage.
Make sure you have executed the [00.configuration](00.configuration.ipynb) before running this notebook.
In this notebook you will learn how to:
1. Create an `Experiment` in an existing `Workspace`.
2. Attach an existing DSVM to a workspace.
3. Configure AutoML using `AutoMLConfig`.
4. Train the model using the DSVM.
5. Explore the results.
6. Test the best fitted model.
In addition this notebook showcases the following features
- **Parallel** executions for iterations
- **Asynchronous** tracking of progress
- **Cancellation** of individual iterations or the entire run
- Retrieving models for any iteration or logged metric
- Specifying AutoML settings as `**kwargs`
- Handling **text** data using the `preprocess` flag
## Create an Experiment
As part of the setup you have already created an Azure ML `Workspace` object. For AutoML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments.
```
import logging
import os
import random
from matplotlib import pyplot as plt
from matplotlib.pyplot import imshow
import numpy as np
import pandas as pd
from sklearn import datasets
import azureml.core
from azureml.core.experiment import Experiment
from azureml.core.workspace import Workspace
from azureml.train.automl import AutoMLConfig
from azureml.train.automl.run import AutoMLRun
ws = Workspace.from_config()
# Choose a name for the run history container in the workspace.
experiment_name = 'automl-remote-dsvm-blobstore'
project_folder = './sample_projects/automl-remote-dsvm-blobstore'
experiment = Experiment(ws, experiment_name)
output = {}
output['SDK version'] = azureml.core.VERSION
output['Subscription ID'] = ws.subscription_id
output['Workspace'] = ws.name
output['Resource Group'] = ws.resource_group
output['Location'] = ws.location
output['Project Directory'] = project_folder
output['Experiment Name'] = experiment.name
pd.set_option('display.max_colwidth', -1)
pd.DataFrame(data=output, index=['']).T
```
## Diagnostics
Opt-in diagnostics for better experience, quality, and security of future releases.
```
from azureml.telemetry import set_diagnostics_collection
set_diagnostics_collection(send_diagnostics = True)
```
## Attach a Remote Linux DSVM
To use a remote Docker compute target:
1. Create a Linux DSVM in Azure, following these [quick instructions](https://docs.microsoft.com/en-us/azure/machine-learning/desktop-workbench/how-to-create-dsvm-hdi). Make sure you use the Ubuntu flavor (not CentOS). Make sure that disk space is available under `/tmp` because AutoML creates files under `/tmp/azureml_run`s. The DSVM should have more cores than the number of parallel runs that you plan to enable. It should also have at least 4GB per core.
2. Enter the IP address, user name and password below.
**Note:** By default, SSH runs on port 22 and you don't need to change the port number below. If you've configured SSH to use a different port, change `dsvm_ssh_port` accordinglyaddress. [Read more](https://render.githubusercontent.com/documentation/sdk/ssh-issue.md) on changing SSH ports for security reasons.
```
from azureml.core.compute import RemoteCompute
import time
# Add your VM information below
# If a compute with the specified compute_name already exists, it will be used and the dsvm_ip_addr, dsvm_ssh_port,
# dsvm_username and dsvm_password will be ignored.
compute_name = 'mydsvm'
dsvm_ip_addr = '<<ip_addr>>'
dsvm_ssh_port = 22
dsvm_username = '<<username>>'
dsvm_password = '<<password>>'
if compute_name in ws.compute_targets():
print('Using existing compute.')
dsvm_compute = ws.compute_targets()[compute_name]
else:
RemoteCompute.attach(workspace=ws, name=compute_name, address=dsvm_ip_addr, username=dsvm_username, password=dsvm_password, ssh_port=dsvm_ssh_port)
while ws.compute_targets()[compute_name].provisioning_state == 'Creating':
time.sleep(1)
dsvm_compute = ws.compute_targets()[compute_name]
if dsvm_compute.provisioning_state == 'Failed':
print('Attached failed.')
print(dsvm_compute.provisioning_errors)
dsvm_compute.delete()
```
## Create Get Data File
For remote executions you should author a `get_data.py` file containing a `get_data()` function. This file should be in the root directory of the project. You can encapsulate code to read data either from a blob storage or local disk in this file.
In this example, the `get_data()` function returns a [dictionary](README.md#getdata).
```
if not os.path.exists(project_folder):
os.makedirs(project_folder)
%%writefile $project_folder/get_data.py
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
def get_data():
# Load Burning Man 2016 data.
df = pd.read_csv("https://automldemods.blob.core.windows.net/datasets/PlayaEvents2016,_1.6MB,_3.4k-rows.cleaned.2.tsv",
delimiter="\t", quotechar='"')
# Get integer labels.
le = LabelEncoder()
le.fit(df["Label"].values)
y = le.transform(df["Label"].values)
X = df.drop(["Label"], axis=1)
X_train, _, y_train, _ = train_test_split(X, y, test_size = 0.1, random_state = 42)
return { "X" : X_train, "y" : y_train }
```
### View data
You can execute the `get_data()` function locally to view the training data.
```
%run $project_folder/get_data.py
data_dict = get_data()
df = data_dict["X"]
y = data_dict["y"]
pd.set_option('display.max_colwidth', 15)
df['Label'] = pd.Series(y, index=df.index)
df.head()
```
## Configure AutoML <a class="anchor" id="Instatiate-AutoML-Remote-DSVM"></a>
You can specify `automl_settings` as `**kwargs` as well. Also note that you can use a `get_data()` function for local excutions too.
**Note:** When using Remote DSVM, you can't pass Numpy arrays directly to the fit method.
|Property|Description|
|-|-|
|**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: <br><i>accuracy</i><br><i>AUC_weighted</i><br><i>balanced_accuracy</i><br><i>average_precision_score_weighted</i><br><i>precision_score_weighted</i>|
|**max_time_sec**|Time limit in seconds for each iteration.|
|**iterations**|Number of iterations. In each iteration AutoML trains a specific pipeline with the data.|
|**n_cross_validations**|Number of cross validation splits.|
|**concurrent_iterations**|Maximum number of iterations that would be executed in parallel. This should be less than the number of cores on the DSVM.|
|**preprocess**|Setting this to *True* enables AutoML to perform preprocessing on the input to handle *missing data*, and to perform some common *feature extraction*.|
|**max_cores_per_iteration**|Indicates how many cores on the compute target would be used to train a single pipeline.<br>Default is *1*; you can set it to *-1* to use all cores.|
```
automl_settings = {
"max_time_sec": 3600,
"iterations": 10,
"n_cross_validations": 5,
"primary_metric": 'AUC_weighted',
"preprocess": True,
"max_cores_per_iteration": 2
}
automl_config = AutoMLConfig(task = 'classification',
path = project_folder,
compute_target = dsvm_compute,
data_script = project_folder + "/get_data.py",
**automl_settings
)
```
## Train the Models <a class="anchor" id="Training-the-model-Remote-DSVM"></a>
Call the `submit` method on the experiment object and pass the run configuration. For remote runs the execution is asynchronous, so you will see the iterations get populated as they complete. You can interact with the widgets and models even when the experiment is running to retrieve the best model up to that point. Once you are satisfied with the model, you can cancel a particular iteration or the whole run.
```
remote_run = experiment.submit(automl_config)
```
## Exploring the Results <a class="anchor" id="Exploring-the-Results-Remote-DSVM"></a>
#### Widget for Monitoring Runs
The widget will first report a "loading" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.
You can click on a pipeline to see run properties and output logs. Logs are also available on the DSVM under `/tmp/azureml_run/{iterationid}/azureml-logs`
**Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details.
```
from azureml.train.widgets import RunDetails
RunDetails(remote_run).show()
```
#### Retrieve All Child Runs
You can also use SDK methods to fetch all the child runs and see individual metrics that we log.
```
children = list(remote_run.get_children())
metricslist = {}
for run in children:
properties = run.get_properties()
metrics = {k: v for k, v in run.get_metrics().items() if isinstance(v, float)}
metricslist[int(properties['iteration'])] = metrics
rundata = pd.DataFrame(metricslist).sort_index(1)
rundata
```
## Cancelling Runs
You can cancel ongoing remote runs using the `cancel` and `cancel_iteration` functions.
```
# Cancel the ongoing experiment and stop scheduling new iterations.
remote_run.cancel()
# Cancel iteration 1 and move onto iteration 2.
# remote_run.cancel_iteration(1)
```
### Retrieve the Best Model
Below we select the best pipeline from our iterations. The `get_output` method returns the best run and the fitted model. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*.
```
best_run, fitted_model = remote_run.get_output()
print(best_run)
print(fitted_model)
```
#### Best Model Based on Any Other Metric
Show the run and the model which has the smallest `accuracy` value:
```
# lookup_metric = "accuracy"
# best_run, fitted_model = remote_run.get_output(metric = lookup_metric)
```
#### Model from a Specific Iteration
```
iteration = 0
zero_run, zero_model = remote_run.get_output(iteration = iteration)
```
### Testing the Fitted Model <a class="anchor" id="Testing-the-Fitted-Model-Remote-DSVM"></a>
```
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from pandas_ml import ConfusionMatrix
df = pd.read_csv("https://automldemods.blob.core.windows.net/datasets/PlayaEvents2016,_1.6MB,_3.4k-rows.cleaned.2.tsv",
delimiter="\t", quotechar='"')
# get integer labels
le = LabelEncoder()
le.fit(df["Label"].values)
y = le.transform(df["Label"].values)
X = df.drop(["Label"], axis=1)
_, X_test, _, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
ypred = fitted_model.predict(X_test.values)
ypred_strings = le.inverse_transform(ypred)
ytest_strings = le.inverse_transform(y_test)
cm = ConfusionMatrix(ytest_strings, ypred_strings)
print(cm)
cm.plot()
```
|
github_jupyter
|
import logging
import os
import random
from matplotlib import pyplot as plt
from matplotlib.pyplot import imshow
import numpy as np
import pandas as pd
from sklearn import datasets
import azureml.core
from azureml.core.experiment import Experiment
from azureml.core.workspace import Workspace
from azureml.train.automl import AutoMLConfig
from azureml.train.automl.run import AutoMLRun
ws = Workspace.from_config()
# Choose a name for the run history container in the workspace.
experiment_name = 'automl-remote-dsvm-blobstore'
project_folder = './sample_projects/automl-remote-dsvm-blobstore'
experiment = Experiment(ws, experiment_name)
output = {}
output['SDK version'] = azureml.core.VERSION
output['Subscription ID'] = ws.subscription_id
output['Workspace'] = ws.name
output['Resource Group'] = ws.resource_group
output['Location'] = ws.location
output['Project Directory'] = project_folder
output['Experiment Name'] = experiment.name
pd.set_option('display.max_colwidth', -1)
pd.DataFrame(data=output, index=['']).T
from azureml.telemetry import set_diagnostics_collection
set_diagnostics_collection(send_diagnostics = True)
from azureml.core.compute import RemoteCompute
import time
# Add your VM information below
# If a compute with the specified compute_name already exists, it will be used and the dsvm_ip_addr, dsvm_ssh_port,
# dsvm_username and dsvm_password will be ignored.
compute_name = 'mydsvm'
dsvm_ip_addr = '<<ip_addr>>'
dsvm_ssh_port = 22
dsvm_username = '<<username>>'
dsvm_password = '<<password>>'
if compute_name in ws.compute_targets():
print('Using existing compute.')
dsvm_compute = ws.compute_targets()[compute_name]
else:
RemoteCompute.attach(workspace=ws, name=compute_name, address=dsvm_ip_addr, username=dsvm_username, password=dsvm_password, ssh_port=dsvm_ssh_port)
while ws.compute_targets()[compute_name].provisioning_state == 'Creating':
time.sleep(1)
dsvm_compute = ws.compute_targets()[compute_name]
if dsvm_compute.provisioning_state == 'Failed':
print('Attached failed.')
print(dsvm_compute.provisioning_errors)
dsvm_compute.delete()
if not os.path.exists(project_folder):
os.makedirs(project_folder)
%%writefile $project_folder/get_data.py
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
def get_data():
# Load Burning Man 2016 data.
df = pd.read_csv("https://automldemods.blob.core.windows.net/datasets/PlayaEvents2016,_1.6MB,_3.4k-rows.cleaned.2.tsv",
delimiter="\t", quotechar='"')
# Get integer labels.
le = LabelEncoder()
le.fit(df["Label"].values)
y = le.transform(df["Label"].values)
X = df.drop(["Label"], axis=1)
X_train, _, y_train, _ = train_test_split(X, y, test_size = 0.1, random_state = 42)
return { "X" : X_train, "y" : y_train }
%run $project_folder/get_data.py
data_dict = get_data()
df = data_dict["X"]
y = data_dict["y"]
pd.set_option('display.max_colwidth', 15)
df['Label'] = pd.Series(y, index=df.index)
df.head()
automl_settings = {
"max_time_sec": 3600,
"iterations": 10,
"n_cross_validations": 5,
"primary_metric": 'AUC_weighted',
"preprocess": True,
"max_cores_per_iteration": 2
}
automl_config = AutoMLConfig(task = 'classification',
path = project_folder,
compute_target = dsvm_compute,
data_script = project_folder + "/get_data.py",
**automl_settings
)
remote_run = experiment.submit(automl_config)
from azureml.train.widgets import RunDetails
RunDetails(remote_run).show()
children = list(remote_run.get_children())
metricslist = {}
for run in children:
properties = run.get_properties()
metrics = {k: v for k, v in run.get_metrics().items() if isinstance(v, float)}
metricslist[int(properties['iteration'])] = metrics
rundata = pd.DataFrame(metricslist).sort_index(1)
rundata
# Cancel the ongoing experiment and stop scheduling new iterations.
remote_run.cancel()
# Cancel iteration 1 and move onto iteration 2.
# remote_run.cancel_iteration(1)
best_run, fitted_model = remote_run.get_output()
print(best_run)
print(fitted_model)
# lookup_metric = "accuracy"
# best_run, fitted_model = remote_run.get_output(metric = lookup_metric)
iteration = 0
zero_run, zero_model = remote_run.get_output(iteration = iteration)
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from pandas_ml import ConfusionMatrix
df = pd.read_csv("https://automldemods.blob.core.windows.net/datasets/PlayaEvents2016,_1.6MB,_3.4k-rows.cleaned.2.tsv",
delimiter="\t", quotechar='"')
# get integer labels
le = LabelEncoder()
le.fit(df["Label"].values)
y = le.transform(df["Label"].values)
X = df.drop(["Label"], axis=1)
_, X_test, _, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
ypred = fitted_model.predict(X_test.values)
ypred_strings = le.inverse_transform(ypred)
ytest_strings = le.inverse_transform(y_test)
cm = ConfusionMatrix(ytest_strings, ypred_strings)
print(cm)
cm.plot()
| 0.601594 | 0.932576 |
```
import jnius_config
jnius_config.set_classpath('.', './X3DJSAIL.3.3.full.jar')
from jnius import autoclass
CommentsBlock = autoclass('org.web3d.x3d.jsail.Core.CommentsBlock')
Anchor = autoclass('org.web3d.x3d.jsail.Networking.AnchorObject')
Appearance = autoclass('org.web3d.x3d.jsail.Shape.AppearanceObject')
Arc2D = autoclass('org.web3d.x3d.jsail.Geometry2D.Arc2DObject')
ArcClose2D = autoclass('org.web3d.x3d.jsail.Geometry2D.ArcClose2DObject')
AudioClip = autoclass('org.web3d.x3d.jsail.Sound.AudioClipObject')
Background = autoclass('org.web3d.x3d.jsail.EnvironmentalEffects.BackgroundObject')
BallJoint = autoclass('org.web3d.x3d.jsail.RigidBodyPhysics.BallJointObject')
Billboard = autoclass('org.web3d.x3d.jsail.Navigation.BillboardObject')
BlendedVolumeStyle = autoclass('org.web3d.x3d.jsail.VolumeRendering.BlendedVolumeStyleObject')
BooleanFilter = autoclass('org.web3d.x3d.jsail.EventUtilities.BooleanFilterObject')
BooleanSequencer = autoclass('org.web3d.x3d.jsail.EventUtilities.BooleanSequencerObject')
BooleanToggle = autoclass('org.web3d.x3d.jsail.EventUtilities.BooleanToggleObject')
BooleanTrigger = autoclass('org.web3d.x3d.jsail.EventUtilities.BooleanTriggerObject')
BoundaryEnhancementVolumeStyle = autoclass('org.web3d.x3d.jsail.VolumeRendering.BoundaryEnhancementVolumeStyleObject')
BoundedPhysicsModel = autoclass('org.web3d.x3d.jsail.ParticleSystems.BoundedPhysicsModelObject')
Box = autoclass('org.web3d.x3d.jsail.Geometry3D.BoxObject')
CADAssembly = autoclass('org.web3d.x3d.jsail.CADGeometry.CADAssemblyObject')
CADFace = autoclass('org.web3d.x3d.jsail.CADGeometry.CADFaceObject')
CADLayer = autoclass('org.web3d.x3d.jsail.CADGeometry.CADLayerObject')
CADPart = autoclass('org.web3d.x3d.jsail.CADGeometry.CADPartObject')
CartoonVolumeStyle = autoclass('org.web3d.x3d.jsail.VolumeRendering.CartoonVolumeStyleObject')
Circle2D = autoclass('org.web3d.x3d.jsail.Geometry2D.Circle2DObject')
ClipPlane = autoclass('org.web3d.x3d.jsail.Rendering.ClipPlaneObject')
CollidableOffset = autoclass('org.web3d.x3d.jsail.RigidBodyPhysics.CollidableOffsetObject')
CollidableShape = autoclass('org.web3d.x3d.jsail.RigidBodyPhysics.CollidableShapeObject')
Collision = autoclass('org.web3d.x3d.jsail.Navigation.CollisionObject')
CollisionCollection = autoclass('org.web3d.x3d.jsail.RigidBodyPhysics.CollisionCollectionObject')
CollisionSensor = autoclass('org.web3d.x3d.jsail.RigidBodyPhysics.CollisionSensorObject')
CollisionSpace = autoclass('org.web3d.x3d.jsail.RigidBodyPhysics.CollisionSpaceObject')
Color = autoclass('org.web3d.x3d.jsail.Rendering.ColorObject')
ColorChaser = autoclass('org.web3d.x3d.jsail.Followers.ColorChaserObject')
ColorDamper = autoclass('org.web3d.x3d.jsail.Followers.ColorDamperObject')
ColorInterpolator = autoclass('org.web3d.x3d.jsail.Interpolation.ColorInterpolatorObject')
ColorRGBA = autoclass('org.web3d.x3d.jsail.Rendering.ColorRGBAObject')
ComposedCubeMapTexture = autoclass('org.web3d.x3d.jsail.CubeMapTexturing.ComposedCubeMapTextureObject')
ComposedShader = autoclass('org.web3d.x3d.jsail.Shaders.ComposedShaderObject')
ComposedTexture3D = autoclass('org.web3d.x3d.jsail.Texturing3D.ComposedTexture3DObject')
ComposedVolumeStyle = autoclass('org.web3d.x3d.jsail.VolumeRendering.ComposedVolumeStyleObject')
Cone = autoclass('org.web3d.x3d.jsail.Geometry3D.ConeObject')
ConeEmitter = autoclass('org.web3d.x3d.jsail.ParticleSystems.ConeEmitterObject')
Contact = autoclass('org.web3d.x3d.jsail.RigidBodyPhysics.ContactObject')
Contour2D = autoclass('org.web3d.x3d.jsail.NURBS.Contour2DObject')
ContourPolyline2D = autoclass('org.web3d.x3d.jsail.NURBS.ContourPolyline2DObject')
Coordinate = autoclass('org.web3d.x3d.jsail.Rendering.CoordinateObject')
CoordinateChaser = autoclass('org.web3d.x3d.jsail.Followers.CoordinateChaserObject')
CoordinateDamper = autoclass('org.web3d.x3d.jsail.Followers.CoordinateDamperObject')
CoordinateDouble = autoclass('org.web3d.x3d.jsail.NURBS.CoordinateDoubleObject')
CoordinateInterpolator = autoclass('org.web3d.x3d.jsail.Interpolation.CoordinateInterpolatorObject')
CoordinateInterpolator2D = autoclass('org.web3d.x3d.jsail.Interpolation.CoordinateInterpolator2DObject')
Cylinder = autoclass('org.web3d.x3d.jsail.Geometry3D.CylinderObject')
CylinderSensor = autoclass('org.web3d.x3d.jsail.PointingDeviceSensor.CylinderSensorObject')
DirectionalLight = autoclass('org.web3d.x3d.jsail.Lighting.DirectionalLightObject')
DISEntityManager = autoclass('org.web3d.x3d.jsail.DIS.DISEntityManagerObject')
DISEntityTypeMapping = autoclass('org.web3d.x3d.jsail.DIS.DISEntityTypeMappingObject')
Disk2D = autoclass('org.web3d.x3d.jsail.Geometry2D.Disk2DObject')
DoubleAxisHingeJoint = autoclass('org.web3d.x3d.jsail.RigidBodyPhysics.DoubleAxisHingeJointObject')
EaseInEaseOut = autoclass('org.web3d.x3d.jsail.Interpolation.EaseInEaseOutObject')
EdgeEnhancementVolumeStyle = autoclass('org.web3d.x3d.jsail.VolumeRendering.EdgeEnhancementVolumeStyleObject')
ElevationGrid = autoclass('org.web3d.x3d.jsail.Geometry3D.ElevationGridObject')
EspduTransform = autoclass('org.web3d.x3d.jsail.DIS.EspduTransformObject')
ExplosionEmitter = autoclass('org.web3d.x3d.jsail.ParticleSystems.ExplosionEmitterObject')
Extrusion = autoclass('org.web3d.x3d.jsail.Geometry3D.ExtrusionObject')
FillProperties = autoclass('org.web3d.x3d.jsail.Shape.FillPropertiesObject')
FloatVertexAttribute = autoclass('org.web3d.x3d.jsail.Shaders.FloatVertexAttributeObject')
Fog = autoclass('org.web3d.x3d.jsail.EnvironmentalEffects.FogObject')
FogCoordinate = autoclass('org.web3d.x3d.jsail.EnvironmentalEffects.FogCoordinateObject')
FontStyle = autoclass('org.web3d.x3d.jsail.Text.FontStyleObject')
ForcePhysicsModel = autoclass('org.web3d.x3d.jsail.ParticleSystems.ForcePhysicsModelObject')
GeneratedCubeMapTexture = autoclass('org.web3d.x3d.jsail.CubeMapTexturing.GeneratedCubeMapTextureObject')
GeoCoordinate = autoclass('org.web3d.x3d.jsail.Geospatial.GeoCoordinateObject')
GeoElevationGrid = autoclass('org.web3d.x3d.jsail.Geospatial.GeoElevationGridObject')
GeoLocation = autoclass('org.web3d.x3d.jsail.Geospatial.GeoLocationObject')
GeoLOD = autoclass('org.web3d.x3d.jsail.Geospatial.GeoLODObject')
GeoMetadata = autoclass('org.web3d.x3d.jsail.Geospatial.GeoMetadataObject')
GeoOrigin = autoclass('org.web3d.x3d.jsail.Geospatial.GeoOriginObject')
GeoPositionInterpolator = autoclass('org.web3d.x3d.jsail.Geospatial.GeoPositionInterpolatorObject')
GeoProximitySensor = autoclass('org.web3d.x3d.jsail.Geospatial.GeoProximitySensorObject')
GeoTouchSensor = autoclass('org.web3d.x3d.jsail.Geospatial.GeoTouchSensorObject')
GeoTransform = autoclass('org.web3d.x3d.jsail.Geospatial.GeoTransformObject')
GeoViewpoint = autoclass('org.web3d.x3d.jsail.Geospatial.GeoViewpointObject')
Group = autoclass('org.web3d.x3d.jsail.Grouping.GroupObject')
HAnimDisplacer = autoclass('org.web3d.x3d.jsail.HAnim.HAnimDisplacerObject')
HAnimHumanoid = autoclass('org.web3d.x3d.jsail.HAnim.HAnimHumanoidObject')
HAnimJoint = autoclass('org.web3d.x3d.jsail.HAnim.HAnimJointObject')
HAnimMotion = autoclass('org.web3d.x3d.jsail.HAnim.HAnimMotionObject')
HAnimSegment = autoclass('org.web3d.x3d.jsail.HAnim.HAnimSegmentObject')
HAnimSite = autoclass('org.web3d.x3d.jsail.HAnim.HAnimSiteObject')
ImageCubeMapTexture = autoclass('org.web3d.x3d.jsail.CubeMapTexturing.ImageCubeMapTextureObject')
ImageTexture = autoclass('org.web3d.x3d.jsail.Texturing.ImageTextureObject')
ImageTexture3D = autoclass('org.web3d.x3d.jsail.Texturing3D.ImageTexture3DObject')
IndexedFaceSet = autoclass('org.web3d.x3d.jsail.Geometry3D.IndexedFaceSetObject')
IndexedLineSet = autoclass('org.web3d.x3d.jsail.Rendering.IndexedLineSetObject')
IndexedQuadSet = autoclass('org.web3d.x3d.jsail.CADGeometry.IndexedQuadSetObject')
IndexedTriangleFanSet = autoclass('org.web3d.x3d.jsail.Rendering.IndexedTriangleFanSetObject')
IndexedTriangleSet = autoclass('org.web3d.x3d.jsail.Rendering.IndexedTriangleSetObject')
IndexedTriangleStripSet = autoclass('org.web3d.x3d.jsail.Rendering.IndexedTriangleStripSetObject')
Inline = autoclass('org.web3d.x3d.jsail.Networking.InlineObject')
IntegerSequencer = autoclass('org.web3d.x3d.jsail.EventUtilities.IntegerSequencerObject')
IntegerTrigger = autoclass('org.web3d.x3d.jsail.EventUtilities.IntegerTriggerObject')
IsoSurfaceVolumeData = autoclass('org.web3d.x3d.jsail.VolumeRendering.IsoSurfaceVolumeDataObject')
KeySensor = autoclass('org.web3d.x3d.jsail.KeyDeviceSensor.KeySensorObject')
Layer = autoclass('org.web3d.x3d.jsail.Layering.LayerObject')
LayerSet = autoclass('org.web3d.x3d.jsail.Layering.LayerSetObject')
Layout = autoclass('org.web3d.x3d.jsail.Layout.LayoutObject')
LayoutGroup = autoclass('org.web3d.x3d.jsail.Layout.LayoutGroupObject')
LayoutLayer = autoclass('org.web3d.x3d.jsail.Layout.LayoutLayerObject')
LinePickSensor = autoclass('org.web3d.x3d.jsail.Picking.LinePickSensorObject')
LineProperties = autoclass('org.web3d.x3d.jsail.Shape.LinePropertiesObject')
LineSet = autoclass('org.web3d.x3d.jsail.Rendering.LineSetObject')
LoadSensor = autoclass('org.web3d.x3d.jsail.Networking.LoadSensorObject')
LocalFog = autoclass('org.web3d.x3d.jsail.EnvironmentalEffects.LocalFogObject')
LOD = autoclass('org.web3d.x3d.jsail.Navigation.LODObject')
Material = autoclass('org.web3d.x3d.jsail.Shape.MaterialObject')
Matrix3VertexAttribute = autoclass('org.web3d.x3d.jsail.Shaders.Matrix3VertexAttributeObject')
Matrix4VertexAttribute = autoclass('org.web3d.x3d.jsail.Shaders.Matrix4VertexAttributeObject')
MetadataBoolean = autoclass('org.web3d.x3d.jsail.Core.MetadataBooleanObject')
MetadataDouble = autoclass('org.web3d.x3d.jsail.Core.MetadataDoubleObject')
MetadataFloat = autoclass('org.web3d.x3d.jsail.Core.MetadataFloatObject')
MetadataInteger = autoclass('org.web3d.x3d.jsail.Core.MetadataIntegerObject')
MetadataSet = autoclass('org.web3d.x3d.jsail.Core.MetadataSetObject')
MetadataString = autoclass('org.web3d.x3d.jsail.Core.MetadataStringObject')
MotorJoint = autoclass('org.web3d.x3d.jsail.RigidBodyPhysics.MotorJointObject')
MovieTexture = autoclass('org.web3d.x3d.jsail.Texturing.MovieTextureObject')
MultiTexture = autoclass('org.web3d.x3d.jsail.Texturing.MultiTextureObject')
MultiTextureCoordinate = autoclass('org.web3d.x3d.jsail.Texturing.MultiTextureCoordinateObject')
MultiTextureTransform = autoclass('org.web3d.x3d.jsail.Texturing.MultiTextureTransformObject')
NavigationInfo = autoclass('org.web3d.x3d.jsail.Navigation.NavigationInfoObject')
Normal = autoclass('org.web3d.x3d.jsail.Rendering.NormalObject')
NormalInterpolator = autoclass('org.web3d.x3d.jsail.Interpolation.NormalInterpolatorObject')
NurbsCurve = autoclass('org.web3d.x3d.jsail.NURBS.NurbsCurveObject')
NurbsCurve2D = autoclass('org.web3d.x3d.jsail.NURBS.NurbsCurve2DObject')
NurbsOrientationInterpolator = autoclass('org.web3d.x3d.jsail.NURBS.NurbsOrientationInterpolatorObject')
NurbsPatchSurface = autoclass('org.web3d.x3d.jsail.NURBS.NurbsPatchSurfaceObject')
NurbsPositionInterpolator = autoclass('org.web3d.x3d.jsail.NURBS.NurbsPositionInterpolatorObject')
NurbsSet = autoclass('org.web3d.x3d.jsail.NURBS.NurbsSetObject')
NurbsSurfaceInterpolator = autoclass('org.web3d.x3d.jsail.NURBS.NurbsSurfaceInterpolatorObject')
NurbsSweptSurface = autoclass('org.web3d.x3d.jsail.NURBS.NurbsSweptSurfaceObject')
NurbsSwungSurface = autoclass('org.web3d.x3d.jsail.NURBS.NurbsSwungSurfaceObject')
NurbsTextureCoordinate = autoclass('org.web3d.x3d.jsail.NURBS.NurbsTextureCoordinateObject')
NurbsTrimmedSurface = autoclass('org.web3d.x3d.jsail.NURBS.NurbsTrimmedSurfaceObject')
OpacityMapVolumeStyle = autoclass('org.web3d.x3d.jsail.VolumeRendering.OpacityMapVolumeStyleObject')
OrientationChaser = autoclass('org.web3d.x3d.jsail.Followers.OrientationChaserObject')
OrientationDamper = autoclass('org.web3d.x3d.jsail.Followers.OrientationDamperObject')
OrientationInterpolator = autoclass('org.web3d.x3d.jsail.Interpolation.OrientationInterpolatorObject')
OrthoViewpoint = autoclass('org.web3d.x3d.jsail.Navigation.OrthoViewpointObject')
PackagedShader = autoclass('org.web3d.x3d.jsail.Shaders.PackagedShaderObject')
ParticleSystem = autoclass('org.web3d.x3d.jsail.ParticleSystems.ParticleSystemObject')
PickableGroup = autoclass('org.web3d.x3d.jsail.Picking.PickableGroupObject')
PixelTexture = autoclass('org.web3d.x3d.jsail.Texturing.PixelTextureObject')
PixelTexture3D = autoclass('org.web3d.x3d.jsail.Texturing3D.PixelTexture3DObject')
PlaneSensor = autoclass('org.web3d.x3d.jsail.PointingDeviceSensor.PlaneSensorObject')
PointEmitter = autoclass('org.web3d.x3d.jsail.ParticleSystems.PointEmitterObject')
PointLight = autoclass('org.web3d.x3d.jsail.Lighting.PointLightObject')
PointPickSensor = autoclass('org.web3d.x3d.jsail.Picking.PointPickSensorObject')
PointProperties = autoclass('org.web3d.x3d.jsail.Shape.PointPropertiesObject')
PointSet = autoclass('org.web3d.x3d.jsail.Rendering.PointSetObject')
Polyline2D = autoclass('org.web3d.x3d.jsail.Geometry2D.Polyline2DObject')
PolylineEmitter = autoclass('org.web3d.x3d.jsail.ParticleSystems.PolylineEmitterObject')
Polypoint2D = autoclass('org.web3d.x3d.jsail.Geometry2D.Polypoint2DObject')
PositionChaser = autoclass('org.web3d.x3d.jsail.Followers.PositionChaserObject')
PositionChaser2D = autoclass('org.web3d.x3d.jsail.Followers.PositionChaser2DObject')
PositionDamper = autoclass('org.web3d.x3d.jsail.Followers.PositionDamperObject')
PositionDamper2D = autoclass('org.web3d.x3d.jsail.Followers.PositionDamper2DObject')
PositionInterpolator = autoclass('org.web3d.x3d.jsail.Interpolation.PositionInterpolatorObject')
PositionInterpolator2D = autoclass('org.web3d.x3d.jsail.Interpolation.PositionInterpolator2DObject')
PrimitivePickSensor = autoclass('org.web3d.x3d.jsail.Picking.PrimitivePickSensorObject')
ProgramShader = autoclass('org.web3d.x3d.jsail.Shaders.ProgramShaderObject')
ProjectionVolumeStyle = autoclass('org.web3d.x3d.jsail.VolumeRendering.ProjectionVolumeStyleObject')
ProtoInstance = autoclass('org.web3d.x3d.jsail.Core.ProtoInstanceObject')
ProximitySensor = autoclass('org.web3d.x3d.jsail.EnvironmentalSensor.ProximitySensorObject')
QuadSet = autoclass('org.web3d.x3d.jsail.CADGeometry.QuadSetObject')
ReceiverPdu = autoclass('org.web3d.x3d.jsail.DIS.ReceiverPduObject')
Rectangle2D = autoclass('org.web3d.x3d.jsail.Geometry2D.Rectangle2DObject')
RigidBody = autoclass('org.web3d.x3d.jsail.RigidBodyPhysics.RigidBodyObject')
RigidBodyCollection = autoclass('org.web3d.x3d.jsail.RigidBodyPhysics.RigidBodyCollectionObject')
ScalarChaser = autoclass('org.web3d.x3d.jsail.Followers.ScalarChaserObject')
ScalarDamper = autoclass('org.web3d.x3d.jsail.Followers.ScalarDamperObject')
ScalarInterpolator = autoclass('org.web3d.x3d.jsail.Interpolation.ScalarInterpolatorObject')
ScreenFontStyle = autoclass('org.web3d.x3d.jsail.Layout.ScreenFontStyleObject')
ScreenGroup = autoclass('org.web3d.x3d.jsail.Layout.ScreenGroupObject')
Script = autoclass('org.web3d.x3d.jsail.Scripting.ScriptObject')
SegmentedVolumeData = autoclass('org.web3d.x3d.jsail.VolumeRendering.SegmentedVolumeDataObject')
ShadedVolumeStyle = autoclass('org.web3d.x3d.jsail.VolumeRendering.ShadedVolumeStyleObject')
ShaderPart = autoclass('org.web3d.x3d.jsail.Shaders.ShaderPartObject')
ShaderProgram = autoclass('org.web3d.x3d.jsail.Shaders.ShaderProgramObject')
Shape = autoclass('org.web3d.x3d.jsail.Shape.ShapeObject')
SignalPdu = autoclass('org.web3d.x3d.jsail.DIS.SignalPduObject')
SilhouetteEnhancementVolumeStyle = autoclass('org.web3d.x3d.jsail.VolumeRendering.SilhouetteEnhancementVolumeStyleObject')
SingleAxisHingeJoint = autoclass('org.web3d.x3d.jsail.RigidBodyPhysics.SingleAxisHingeJointObject')
SliderJoint = autoclass('org.web3d.x3d.jsail.RigidBodyPhysics.SliderJointObject')
Sound = autoclass('org.web3d.x3d.jsail.Sound.SoundObject')
Sphere = autoclass('org.web3d.x3d.jsail.Geometry3D.SphereObject')
SphereSensor = autoclass('org.web3d.x3d.jsail.PointingDeviceSensor.SphereSensorObject')
SplinePositionInterpolator = autoclass('org.web3d.x3d.jsail.Interpolation.SplinePositionInterpolatorObject')
SplinePositionInterpolator2D = autoclass('org.web3d.x3d.jsail.Interpolation.SplinePositionInterpolator2DObject')
SplineScalarInterpolator = autoclass('org.web3d.x3d.jsail.Interpolation.SplineScalarInterpolatorObject')
SpotLight = autoclass('org.web3d.x3d.jsail.Lighting.SpotLightObject')
SquadOrientationInterpolator = autoclass('org.web3d.x3d.jsail.Interpolation.SquadOrientationInterpolatorObject')
StaticGroup = autoclass('org.web3d.x3d.jsail.Grouping.StaticGroupObject')
StringSensor = autoclass('org.web3d.x3d.jsail.KeyDeviceSensor.StringSensorObject')
SurfaceEmitter = autoclass('org.web3d.x3d.jsail.ParticleSystems.SurfaceEmitterObject')
Switch = autoclass('org.web3d.x3d.jsail.Grouping.SwitchObject')
TexCoordChaser2D = autoclass('org.web3d.x3d.jsail.Followers.TexCoordChaser2DObject')
TexCoordDamper2D = autoclass('org.web3d.x3d.jsail.Followers.TexCoordDamper2DObject')
Text = autoclass('org.web3d.x3d.jsail.Text.TextObject')
TextureBackground = autoclass('org.web3d.x3d.jsail.EnvironmentalEffects.TextureBackgroundObject')
TextureCoordinate = autoclass('org.web3d.x3d.jsail.Texturing.TextureCoordinateObject')
TextureCoordinate3D = autoclass('org.web3d.x3d.jsail.Texturing3D.TextureCoordinate3DObject')
TextureCoordinate4D = autoclass('org.web3d.x3d.jsail.Texturing3D.TextureCoordinate4DObject')
TextureCoordinateGenerator = autoclass('org.web3d.x3d.jsail.Texturing.TextureCoordinateGeneratorObject')
TextureProperties = autoclass('org.web3d.x3d.jsail.Texturing.TexturePropertiesObject')
TextureTransform = autoclass('org.web3d.x3d.jsail.Texturing.TextureTransformObject')
TextureTransform3D = autoclass('org.web3d.x3d.jsail.Texturing3D.TextureTransform3DObject')
TextureTransformMatrix3D = autoclass('org.web3d.x3d.jsail.Texturing3D.TextureTransformMatrix3DObject')
TimeSensor = autoclass('org.web3d.x3d.jsail.Time.TimeSensorObject')
TimeTrigger = autoclass('org.web3d.x3d.jsail.EventUtilities.TimeTriggerObject')
ToneMappedVolumeStyle = autoclass('org.web3d.x3d.jsail.VolumeRendering.ToneMappedVolumeStyleObject')
TouchSensor = autoclass('org.web3d.x3d.jsail.PointingDeviceSensor.TouchSensorObject')
Transform = autoclass('org.web3d.x3d.jsail.Grouping.TransformObject')
TransformSensor = autoclass('org.web3d.x3d.jsail.EnvironmentalSensor.TransformSensorObject')
TransmitterPdu = autoclass('org.web3d.x3d.jsail.DIS.TransmitterPduObject')
TriangleFanSet = autoclass('org.web3d.x3d.jsail.Rendering.TriangleFanSetObject')
TriangleSet = autoclass('org.web3d.x3d.jsail.Rendering.TriangleSetObject')
TriangleSet2D = autoclass('org.web3d.x3d.jsail.Geometry2D.TriangleSet2DObject')
TriangleStripSet = autoclass('org.web3d.x3d.jsail.Rendering.TriangleStripSetObject')
TwoSidedMaterial = autoclass('org.web3d.x3d.jsail.Shape.TwoSidedMaterialObject')
UniversalJoint = autoclass('org.web3d.x3d.jsail.RigidBodyPhysics.UniversalJointObject')
Viewpoint = autoclass('org.web3d.x3d.jsail.Navigation.ViewpointObject')
ViewpointGroup = autoclass('org.web3d.x3d.jsail.Navigation.ViewpointGroupObject')
Viewport = autoclass('org.web3d.x3d.jsail.Layering.ViewportObject')
VisibilitySensor = autoclass('org.web3d.x3d.jsail.EnvironmentalSensor.VisibilitySensorObject')
VolumeData = autoclass('org.web3d.x3d.jsail.VolumeRendering.VolumeDataObject')
VolumeEmitter = autoclass('org.web3d.x3d.jsail.ParticleSystems.VolumeEmitterObject')
VolumePickSensor = autoclass('org.web3d.x3d.jsail.Picking.VolumePickSensorObject')
WindPhysicsModel = autoclass('org.web3d.x3d.jsail.ParticleSystems.WindPhysicsModelObject')
WorldInfo = autoclass('org.web3d.x3d.jsail.Core.WorldInfoObject')
component = autoclass('org.web3d.x3d.jsail.Core.componentObject')
connect = autoclass('org.web3d.x3d.jsail.Core.connectObject')
EXPORT = autoclass('org.web3d.x3d.jsail.Networking.EXPORTObject')
ExternProtoDeclare = autoclass('org.web3d.x3d.jsail.Core.ExternProtoDeclareObject')
field = autoclass('org.web3d.x3d.jsail.Core.fieldObject')
fieldValue = autoclass('org.web3d.x3d.jsail.Core.fieldValueObject')
head = autoclass('org.web3d.x3d.jsail.Core.headObject')
IMPORT = autoclass('org.web3d.x3d.jsail.Networking.IMPORTObject')
IS = autoclass('org.web3d.x3d.jsail.Core.ISObject')
meta = autoclass('org.web3d.x3d.jsail.Core.metaObject')
ProtoBody = autoclass('org.web3d.x3d.jsail.Core.ProtoBodyObject')
ProtoDeclare = autoclass('org.web3d.x3d.jsail.Core.ProtoDeclareObject')
ProtoInterface = autoclass('org.web3d.x3d.jsail.Core.ProtoInterfaceObject')
ROUTE = autoclass('org.web3d.x3d.jsail.Core.ROUTEObject')
Scene = autoclass('org.web3d.x3d.jsail.Core.SceneObject')
unit = autoclass('org.web3d.x3d.jsail.Core.unitObject')
X3D = autoclass('org.web3d.x3d.jsail.Core.X3DObject')
SFBool = autoclass('org.web3d.x3d.jsail.fields.SFBoolObject')
MFBool = autoclass('org.web3d.x3d.jsail.fields.MFBoolObject')
SFColor = autoclass('org.web3d.x3d.jsail.fields.SFColorObject')
MFColor = autoclass('org.web3d.x3d.jsail.fields.MFColorObject')
SFColorRGBA = autoclass('org.web3d.x3d.jsail.fields.SFColorRGBAObject')
MFColorRGBA = autoclass('org.web3d.x3d.jsail.fields.MFColorRGBAObject')
SFDouble = autoclass('org.web3d.x3d.jsail.fields.SFDoubleObject')
MFDouble = autoclass('org.web3d.x3d.jsail.fields.MFDoubleObject')
SFFloat = autoclass('org.web3d.x3d.jsail.fields.SFFloatObject')
MFFloat = autoclass('org.web3d.x3d.jsail.fields.MFFloatObject')
SFImage = autoclass('org.web3d.x3d.jsail.fields.SFImageObject')
MFImage = autoclass('org.web3d.x3d.jsail.fields.MFImageObject')
SFInt32 = autoclass('org.web3d.x3d.jsail.fields.SFInt32Object')
MFInt32 = autoclass('org.web3d.x3d.jsail.fields.MFInt32Object')
SFMatrix3d = autoclass('org.web3d.x3d.jsail.fields.SFMatrix3dObject')
MFMatrix3d = autoclass('org.web3d.x3d.jsail.fields.MFMatrix3dObject')
SFMatrix3f = autoclass('org.web3d.x3d.jsail.fields.SFMatrix3fObject')
MFMatrix3f = autoclass('org.web3d.x3d.jsail.fields.MFMatrix3fObject')
SFMatrix4d = autoclass('org.web3d.x3d.jsail.fields.SFMatrix4dObject')
MFMatrix4d = autoclass('org.web3d.x3d.jsail.fields.MFMatrix4dObject')
SFMatrix4f = autoclass('org.web3d.x3d.jsail.fields.SFMatrix4fObject')
MFMatrix4f = autoclass('org.web3d.x3d.jsail.fields.MFMatrix4fObject')
SFString = autoclass('org.web3d.x3d.jsail.fields.SFStringObject')
SFNode = autoclass('org.web3d.x3d.jsail.fields.SFNodeObject')
MFNode = autoclass('org.web3d.x3d.jsail.fields.MFNodeObject')
SFRotation = autoclass('org.web3d.x3d.jsail.fields.SFRotationObject')
MFRotation = autoclass('org.web3d.x3d.jsail.fields.MFRotationObject')
MFString = autoclass('org.web3d.x3d.jsail.fields.MFStringObject')
SFTime = autoclass('org.web3d.x3d.jsail.fields.SFTimeObject')
MFTime = autoclass('org.web3d.x3d.jsail.fields.MFTimeObject')
SFVec2d = autoclass('org.web3d.x3d.jsail.fields.SFVec2dObject')
MFVec2d = autoclass('org.web3d.x3d.jsail.fields.MFVec2dObject')
SFVec2f = autoclass('org.web3d.x3d.jsail.fields.SFVec2fObject')
MFVec2f = autoclass('org.web3d.x3d.jsail.fields.MFVec2fObject')
SFVec3d = autoclass('org.web3d.x3d.jsail.fields.SFVec3dObject')
MFVec3d = autoclass('org.web3d.x3d.jsail.fields.MFVec3dObject')
SFVec3f = autoclass('org.web3d.x3d.jsail.fields.SFVec3fObject')
MFVec3f = autoclass('org.web3d.x3d.jsail.fields.MFVec3fObject')
SFVec4d = autoclass('org.web3d.x3d.jsail.fields.SFVec4dObject')
MFVec4d = autoclass('org.web3d.x3d.jsail.fields.MFVec4dObject')
SFVec4f = autoclass('org.web3d.x3d.jsail.fields.SFVec4fObject')
MFVec4f = autoclass('org.web3d.x3d.jsail.fields.MFVec4fObject')
colorMaterial = Material().setDiffuseColor(SFColor(1,0,0))
texture = ImageTexture().setUrl('./files/JoeBodyTexture29.png')
X3D0 = (X3D().setProfile(SFString("Interchange")).setVersion(SFString("3.3"))
.setHead(head()
.addMeta(meta().setName(SFString("title")).setContent(SFString("sphere.x3d")))
.addMeta(meta().setName(SFString("creator")).setContent(SFString("John Carlson")))
.addMeta(meta().setName(SFString("generator")).setContent(SFString("manual")))
.addMeta(meta().setName(SFString("identifier")).setContent(SFString("https://coderextreme.net/X3DJSONLD/sphere.x3d")))
.addMeta(meta().setName(SFString("description")).setContent(SFString("a sphere"))))
.setScene(Scene()
.addChild(Group()
.addChild(Shape()
.setAppearance(Appearance()
.setMaterial(colorMaterial)
.setTexture(texture))
.setGeometry(Sphere().setRadius(SFFloat(1)))))))
x3domHTML = '''<script type='text/javascript' src='https://www.x3dom.org/download/dev/x3dom-full.debug.js'> </script>
<link rel='stylesheet' type='text/css' href='https://www.x3dom.org/download/dev/x3dom.css'></link>'''
xml = X3D0.toStringX3D().replace("ImageTexture", "ImageTexture crossOrigin='anonymous'")
import xml.etree.ElementTree as ET
x3domXML = ET.tostring(ET.XML(xml), encoding="unicode", short_empty_elements=False)
```
## Switch from non-trusted to trusted to allow javascript
```
from IPython.display import HTML
colorMaterial.setDiffuseColor(SFColor(0,1,0))
```
## Render X3D with x3dom
```
HTML(x3domHTML + x3domXML)
x3dfile = 'sphere.x3d'
texture = ImageTexture().setUrl('./JoeBodyTexture29.png')
```
Rerun X3D0 cell above
```
javax3dfilehandle = X3D0.toFileX3D(x3dfile)
```
## Try Inline with x3dom
```
inlineScene = f'''
<X3D>
<Scene>
<Inline url='"./files/{ x3dfile }"'></Inline>
</Scene>
</X3D>
'''
HTML(x3domHTML+inlineScene)
```
## Try X_ITE
```
x_iteHTML = '''
<link rel="stylesheet" type="text/css" href="https://code.create3000.de/x_ite/4.5.2/dist/x_ite.css"/>
<script type="text/javascript" src="https://code.create3000.de/x_ite/4.5.2/dist/x_ite.js"></script>
<style type="text/css">
X3DCanvas {
width: 768px;
height: 432px;
}
</style>
'''
x_iteSphere = f'''
<X3DCanvas src="https://gist.githubusercontent.com/andreasplesch/15ca27439fde2a6a721fbc36d667ea15/raw/3ed8dab387d699fd8780091f1c71019073223c94/AAA_PlaneSensor1DOnEdge.x3d">
<p>Your browser may not support all features required by X_ITE.
For a better experience, keep your browser up to date.
<a href="http://outdatedbrowser.com">Check here for latest versions.</a></p>
</X3DCanvas>
<script>
fileURL = document.location + "END";
fileURL = fileURL.replace("/labEND","/files/") + "{ x3dfile }" ;
//document.querySelector('X3DCanvas').setAttribute("src", fileURL)
'''
x_iteSphere
HTML(x_iteHTML + x_iteSphere)
X3D0.toFileX_ITE('https://gist.githubusercontent.com/andreasplesch/15ca27439fde2a6a721fbc36d667ea15/raw/3ed8dab387d699fd8780091f1c71019073223c94/AAA_PlaneSensor1DOnEdge.x3d', 'x_ite.html')
from IPython.display import IFrame
IFrame("x_ite.html",600,400)
IFrame("https://gist.githack.com/andreasplesch/6710c545312428d3cd95992e5b3d47b1/raw/2425ab0ab809eea436c175dd74183e4d46bcfe3c/index.html",600,400)
```
|
github_jupyter
|
import jnius_config
jnius_config.set_classpath('.', './X3DJSAIL.3.3.full.jar')
from jnius import autoclass
CommentsBlock = autoclass('org.web3d.x3d.jsail.Core.CommentsBlock')
Anchor = autoclass('org.web3d.x3d.jsail.Networking.AnchorObject')
Appearance = autoclass('org.web3d.x3d.jsail.Shape.AppearanceObject')
Arc2D = autoclass('org.web3d.x3d.jsail.Geometry2D.Arc2DObject')
ArcClose2D = autoclass('org.web3d.x3d.jsail.Geometry2D.ArcClose2DObject')
AudioClip = autoclass('org.web3d.x3d.jsail.Sound.AudioClipObject')
Background = autoclass('org.web3d.x3d.jsail.EnvironmentalEffects.BackgroundObject')
BallJoint = autoclass('org.web3d.x3d.jsail.RigidBodyPhysics.BallJointObject')
Billboard = autoclass('org.web3d.x3d.jsail.Navigation.BillboardObject')
BlendedVolumeStyle = autoclass('org.web3d.x3d.jsail.VolumeRendering.BlendedVolumeStyleObject')
BooleanFilter = autoclass('org.web3d.x3d.jsail.EventUtilities.BooleanFilterObject')
BooleanSequencer = autoclass('org.web3d.x3d.jsail.EventUtilities.BooleanSequencerObject')
BooleanToggle = autoclass('org.web3d.x3d.jsail.EventUtilities.BooleanToggleObject')
BooleanTrigger = autoclass('org.web3d.x3d.jsail.EventUtilities.BooleanTriggerObject')
BoundaryEnhancementVolumeStyle = autoclass('org.web3d.x3d.jsail.VolumeRendering.BoundaryEnhancementVolumeStyleObject')
BoundedPhysicsModel = autoclass('org.web3d.x3d.jsail.ParticleSystems.BoundedPhysicsModelObject')
Box = autoclass('org.web3d.x3d.jsail.Geometry3D.BoxObject')
CADAssembly = autoclass('org.web3d.x3d.jsail.CADGeometry.CADAssemblyObject')
CADFace = autoclass('org.web3d.x3d.jsail.CADGeometry.CADFaceObject')
CADLayer = autoclass('org.web3d.x3d.jsail.CADGeometry.CADLayerObject')
CADPart = autoclass('org.web3d.x3d.jsail.CADGeometry.CADPartObject')
CartoonVolumeStyle = autoclass('org.web3d.x3d.jsail.VolumeRendering.CartoonVolumeStyleObject')
Circle2D = autoclass('org.web3d.x3d.jsail.Geometry2D.Circle2DObject')
ClipPlane = autoclass('org.web3d.x3d.jsail.Rendering.ClipPlaneObject')
CollidableOffset = autoclass('org.web3d.x3d.jsail.RigidBodyPhysics.CollidableOffsetObject')
CollidableShape = autoclass('org.web3d.x3d.jsail.RigidBodyPhysics.CollidableShapeObject')
Collision = autoclass('org.web3d.x3d.jsail.Navigation.CollisionObject')
CollisionCollection = autoclass('org.web3d.x3d.jsail.RigidBodyPhysics.CollisionCollectionObject')
CollisionSensor = autoclass('org.web3d.x3d.jsail.RigidBodyPhysics.CollisionSensorObject')
CollisionSpace = autoclass('org.web3d.x3d.jsail.RigidBodyPhysics.CollisionSpaceObject')
Color = autoclass('org.web3d.x3d.jsail.Rendering.ColorObject')
ColorChaser = autoclass('org.web3d.x3d.jsail.Followers.ColorChaserObject')
ColorDamper = autoclass('org.web3d.x3d.jsail.Followers.ColorDamperObject')
ColorInterpolator = autoclass('org.web3d.x3d.jsail.Interpolation.ColorInterpolatorObject')
ColorRGBA = autoclass('org.web3d.x3d.jsail.Rendering.ColorRGBAObject')
ComposedCubeMapTexture = autoclass('org.web3d.x3d.jsail.CubeMapTexturing.ComposedCubeMapTextureObject')
ComposedShader = autoclass('org.web3d.x3d.jsail.Shaders.ComposedShaderObject')
ComposedTexture3D = autoclass('org.web3d.x3d.jsail.Texturing3D.ComposedTexture3DObject')
ComposedVolumeStyle = autoclass('org.web3d.x3d.jsail.VolumeRendering.ComposedVolumeStyleObject')
Cone = autoclass('org.web3d.x3d.jsail.Geometry3D.ConeObject')
ConeEmitter = autoclass('org.web3d.x3d.jsail.ParticleSystems.ConeEmitterObject')
Contact = autoclass('org.web3d.x3d.jsail.RigidBodyPhysics.ContactObject')
Contour2D = autoclass('org.web3d.x3d.jsail.NURBS.Contour2DObject')
ContourPolyline2D = autoclass('org.web3d.x3d.jsail.NURBS.ContourPolyline2DObject')
Coordinate = autoclass('org.web3d.x3d.jsail.Rendering.CoordinateObject')
CoordinateChaser = autoclass('org.web3d.x3d.jsail.Followers.CoordinateChaserObject')
CoordinateDamper = autoclass('org.web3d.x3d.jsail.Followers.CoordinateDamperObject')
CoordinateDouble = autoclass('org.web3d.x3d.jsail.NURBS.CoordinateDoubleObject')
CoordinateInterpolator = autoclass('org.web3d.x3d.jsail.Interpolation.CoordinateInterpolatorObject')
CoordinateInterpolator2D = autoclass('org.web3d.x3d.jsail.Interpolation.CoordinateInterpolator2DObject')
Cylinder = autoclass('org.web3d.x3d.jsail.Geometry3D.CylinderObject')
CylinderSensor = autoclass('org.web3d.x3d.jsail.PointingDeviceSensor.CylinderSensorObject')
DirectionalLight = autoclass('org.web3d.x3d.jsail.Lighting.DirectionalLightObject')
DISEntityManager = autoclass('org.web3d.x3d.jsail.DIS.DISEntityManagerObject')
DISEntityTypeMapping = autoclass('org.web3d.x3d.jsail.DIS.DISEntityTypeMappingObject')
Disk2D = autoclass('org.web3d.x3d.jsail.Geometry2D.Disk2DObject')
DoubleAxisHingeJoint = autoclass('org.web3d.x3d.jsail.RigidBodyPhysics.DoubleAxisHingeJointObject')
EaseInEaseOut = autoclass('org.web3d.x3d.jsail.Interpolation.EaseInEaseOutObject')
EdgeEnhancementVolumeStyle = autoclass('org.web3d.x3d.jsail.VolumeRendering.EdgeEnhancementVolumeStyleObject')
ElevationGrid = autoclass('org.web3d.x3d.jsail.Geometry3D.ElevationGridObject')
EspduTransform = autoclass('org.web3d.x3d.jsail.DIS.EspduTransformObject')
ExplosionEmitter = autoclass('org.web3d.x3d.jsail.ParticleSystems.ExplosionEmitterObject')
Extrusion = autoclass('org.web3d.x3d.jsail.Geometry3D.ExtrusionObject')
FillProperties = autoclass('org.web3d.x3d.jsail.Shape.FillPropertiesObject')
FloatVertexAttribute = autoclass('org.web3d.x3d.jsail.Shaders.FloatVertexAttributeObject')
Fog = autoclass('org.web3d.x3d.jsail.EnvironmentalEffects.FogObject')
FogCoordinate = autoclass('org.web3d.x3d.jsail.EnvironmentalEffects.FogCoordinateObject')
FontStyle = autoclass('org.web3d.x3d.jsail.Text.FontStyleObject')
ForcePhysicsModel = autoclass('org.web3d.x3d.jsail.ParticleSystems.ForcePhysicsModelObject')
GeneratedCubeMapTexture = autoclass('org.web3d.x3d.jsail.CubeMapTexturing.GeneratedCubeMapTextureObject')
GeoCoordinate = autoclass('org.web3d.x3d.jsail.Geospatial.GeoCoordinateObject')
GeoElevationGrid = autoclass('org.web3d.x3d.jsail.Geospatial.GeoElevationGridObject')
GeoLocation = autoclass('org.web3d.x3d.jsail.Geospatial.GeoLocationObject')
GeoLOD = autoclass('org.web3d.x3d.jsail.Geospatial.GeoLODObject')
GeoMetadata = autoclass('org.web3d.x3d.jsail.Geospatial.GeoMetadataObject')
GeoOrigin = autoclass('org.web3d.x3d.jsail.Geospatial.GeoOriginObject')
GeoPositionInterpolator = autoclass('org.web3d.x3d.jsail.Geospatial.GeoPositionInterpolatorObject')
GeoProximitySensor = autoclass('org.web3d.x3d.jsail.Geospatial.GeoProximitySensorObject')
GeoTouchSensor = autoclass('org.web3d.x3d.jsail.Geospatial.GeoTouchSensorObject')
GeoTransform = autoclass('org.web3d.x3d.jsail.Geospatial.GeoTransformObject')
GeoViewpoint = autoclass('org.web3d.x3d.jsail.Geospatial.GeoViewpointObject')
Group = autoclass('org.web3d.x3d.jsail.Grouping.GroupObject')
HAnimDisplacer = autoclass('org.web3d.x3d.jsail.HAnim.HAnimDisplacerObject')
HAnimHumanoid = autoclass('org.web3d.x3d.jsail.HAnim.HAnimHumanoidObject')
HAnimJoint = autoclass('org.web3d.x3d.jsail.HAnim.HAnimJointObject')
HAnimMotion = autoclass('org.web3d.x3d.jsail.HAnim.HAnimMotionObject')
HAnimSegment = autoclass('org.web3d.x3d.jsail.HAnim.HAnimSegmentObject')
HAnimSite = autoclass('org.web3d.x3d.jsail.HAnim.HAnimSiteObject')
ImageCubeMapTexture = autoclass('org.web3d.x3d.jsail.CubeMapTexturing.ImageCubeMapTextureObject')
ImageTexture = autoclass('org.web3d.x3d.jsail.Texturing.ImageTextureObject')
ImageTexture3D = autoclass('org.web3d.x3d.jsail.Texturing3D.ImageTexture3DObject')
IndexedFaceSet = autoclass('org.web3d.x3d.jsail.Geometry3D.IndexedFaceSetObject')
IndexedLineSet = autoclass('org.web3d.x3d.jsail.Rendering.IndexedLineSetObject')
IndexedQuadSet = autoclass('org.web3d.x3d.jsail.CADGeometry.IndexedQuadSetObject')
IndexedTriangleFanSet = autoclass('org.web3d.x3d.jsail.Rendering.IndexedTriangleFanSetObject')
IndexedTriangleSet = autoclass('org.web3d.x3d.jsail.Rendering.IndexedTriangleSetObject')
IndexedTriangleStripSet = autoclass('org.web3d.x3d.jsail.Rendering.IndexedTriangleStripSetObject')
Inline = autoclass('org.web3d.x3d.jsail.Networking.InlineObject')
IntegerSequencer = autoclass('org.web3d.x3d.jsail.EventUtilities.IntegerSequencerObject')
IntegerTrigger = autoclass('org.web3d.x3d.jsail.EventUtilities.IntegerTriggerObject')
IsoSurfaceVolumeData = autoclass('org.web3d.x3d.jsail.VolumeRendering.IsoSurfaceVolumeDataObject')
KeySensor = autoclass('org.web3d.x3d.jsail.KeyDeviceSensor.KeySensorObject')
Layer = autoclass('org.web3d.x3d.jsail.Layering.LayerObject')
LayerSet = autoclass('org.web3d.x3d.jsail.Layering.LayerSetObject')
Layout = autoclass('org.web3d.x3d.jsail.Layout.LayoutObject')
LayoutGroup = autoclass('org.web3d.x3d.jsail.Layout.LayoutGroupObject')
LayoutLayer = autoclass('org.web3d.x3d.jsail.Layout.LayoutLayerObject')
LinePickSensor = autoclass('org.web3d.x3d.jsail.Picking.LinePickSensorObject')
LineProperties = autoclass('org.web3d.x3d.jsail.Shape.LinePropertiesObject')
LineSet = autoclass('org.web3d.x3d.jsail.Rendering.LineSetObject')
LoadSensor = autoclass('org.web3d.x3d.jsail.Networking.LoadSensorObject')
LocalFog = autoclass('org.web3d.x3d.jsail.EnvironmentalEffects.LocalFogObject')
LOD = autoclass('org.web3d.x3d.jsail.Navigation.LODObject')
Material = autoclass('org.web3d.x3d.jsail.Shape.MaterialObject')
Matrix3VertexAttribute = autoclass('org.web3d.x3d.jsail.Shaders.Matrix3VertexAttributeObject')
Matrix4VertexAttribute = autoclass('org.web3d.x3d.jsail.Shaders.Matrix4VertexAttributeObject')
MetadataBoolean = autoclass('org.web3d.x3d.jsail.Core.MetadataBooleanObject')
MetadataDouble = autoclass('org.web3d.x3d.jsail.Core.MetadataDoubleObject')
MetadataFloat = autoclass('org.web3d.x3d.jsail.Core.MetadataFloatObject')
MetadataInteger = autoclass('org.web3d.x3d.jsail.Core.MetadataIntegerObject')
MetadataSet = autoclass('org.web3d.x3d.jsail.Core.MetadataSetObject')
MetadataString = autoclass('org.web3d.x3d.jsail.Core.MetadataStringObject')
MotorJoint = autoclass('org.web3d.x3d.jsail.RigidBodyPhysics.MotorJointObject')
MovieTexture = autoclass('org.web3d.x3d.jsail.Texturing.MovieTextureObject')
MultiTexture = autoclass('org.web3d.x3d.jsail.Texturing.MultiTextureObject')
MultiTextureCoordinate = autoclass('org.web3d.x3d.jsail.Texturing.MultiTextureCoordinateObject')
MultiTextureTransform = autoclass('org.web3d.x3d.jsail.Texturing.MultiTextureTransformObject')
NavigationInfo = autoclass('org.web3d.x3d.jsail.Navigation.NavigationInfoObject')
Normal = autoclass('org.web3d.x3d.jsail.Rendering.NormalObject')
NormalInterpolator = autoclass('org.web3d.x3d.jsail.Interpolation.NormalInterpolatorObject')
NurbsCurve = autoclass('org.web3d.x3d.jsail.NURBS.NurbsCurveObject')
NurbsCurve2D = autoclass('org.web3d.x3d.jsail.NURBS.NurbsCurve2DObject')
NurbsOrientationInterpolator = autoclass('org.web3d.x3d.jsail.NURBS.NurbsOrientationInterpolatorObject')
NurbsPatchSurface = autoclass('org.web3d.x3d.jsail.NURBS.NurbsPatchSurfaceObject')
NurbsPositionInterpolator = autoclass('org.web3d.x3d.jsail.NURBS.NurbsPositionInterpolatorObject')
NurbsSet = autoclass('org.web3d.x3d.jsail.NURBS.NurbsSetObject')
NurbsSurfaceInterpolator = autoclass('org.web3d.x3d.jsail.NURBS.NurbsSurfaceInterpolatorObject')
NurbsSweptSurface = autoclass('org.web3d.x3d.jsail.NURBS.NurbsSweptSurfaceObject')
NurbsSwungSurface = autoclass('org.web3d.x3d.jsail.NURBS.NurbsSwungSurfaceObject')
NurbsTextureCoordinate = autoclass('org.web3d.x3d.jsail.NURBS.NurbsTextureCoordinateObject')
NurbsTrimmedSurface = autoclass('org.web3d.x3d.jsail.NURBS.NurbsTrimmedSurfaceObject')
OpacityMapVolumeStyle = autoclass('org.web3d.x3d.jsail.VolumeRendering.OpacityMapVolumeStyleObject')
OrientationChaser = autoclass('org.web3d.x3d.jsail.Followers.OrientationChaserObject')
OrientationDamper = autoclass('org.web3d.x3d.jsail.Followers.OrientationDamperObject')
OrientationInterpolator = autoclass('org.web3d.x3d.jsail.Interpolation.OrientationInterpolatorObject')
OrthoViewpoint = autoclass('org.web3d.x3d.jsail.Navigation.OrthoViewpointObject')
PackagedShader = autoclass('org.web3d.x3d.jsail.Shaders.PackagedShaderObject')
ParticleSystem = autoclass('org.web3d.x3d.jsail.ParticleSystems.ParticleSystemObject')
PickableGroup = autoclass('org.web3d.x3d.jsail.Picking.PickableGroupObject')
PixelTexture = autoclass('org.web3d.x3d.jsail.Texturing.PixelTextureObject')
PixelTexture3D = autoclass('org.web3d.x3d.jsail.Texturing3D.PixelTexture3DObject')
PlaneSensor = autoclass('org.web3d.x3d.jsail.PointingDeviceSensor.PlaneSensorObject')
PointEmitter = autoclass('org.web3d.x3d.jsail.ParticleSystems.PointEmitterObject')
PointLight = autoclass('org.web3d.x3d.jsail.Lighting.PointLightObject')
PointPickSensor = autoclass('org.web3d.x3d.jsail.Picking.PointPickSensorObject')
PointProperties = autoclass('org.web3d.x3d.jsail.Shape.PointPropertiesObject')
PointSet = autoclass('org.web3d.x3d.jsail.Rendering.PointSetObject')
Polyline2D = autoclass('org.web3d.x3d.jsail.Geometry2D.Polyline2DObject')
PolylineEmitter = autoclass('org.web3d.x3d.jsail.ParticleSystems.PolylineEmitterObject')
Polypoint2D = autoclass('org.web3d.x3d.jsail.Geometry2D.Polypoint2DObject')
PositionChaser = autoclass('org.web3d.x3d.jsail.Followers.PositionChaserObject')
PositionChaser2D = autoclass('org.web3d.x3d.jsail.Followers.PositionChaser2DObject')
PositionDamper = autoclass('org.web3d.x3d.jsail.Followers.PositionDamperObject')
PositionDamper2D = autoclass('org.web3d.x3d.jsail.Followers.PositionDamper2DObject')
PositionInterpolator = autoclass('org.web3d.x3d.jsail.Interpolation.PositionInterpolatorObject')
PositionInterpolator2D = autoclass('org.web3d.x3d.jsail.Interpolation.PositionInterpolator2DObject')
PrimitivePickSensor = autoclass('org.web3d.x3d.jsail.Picking.PrimitivePickSensorObject')
ProgramShader = autoclass('org.web3d.x3d.jsail.Shaders.ProgramShaderObject')
ProjectionVolumeStyle = autoclass('org.web3d.x3d.jsail.VolumeRendering.ProjectionVolumeStyleObject')
ProtoInstance = autoclass('org.web3d.x3d.jsail.Core.ProtoInstanceObject')
ProximitySensor = autoclass('org.web3d.x3d.jsail.EnvironmentalSensor.ProximitySensorObject')
QuadSet = autoclass('org.web3d.x3d.jsail.CADGeometry.QuadSetObject')
ReceiverPdu = autoclass('org.web3d.x3d.jsail.DIS.ReceiverPduObject')
Rectangle2D = autoclass('org.web3d.x3d.jsail.Geometry2D.Rectangle2DObject')
RigidBody = autoclass('org.web3d.x3d.jsail.RigidBodyPhysics.RigidBodyObject')
RigidBodyCollection = autoclass('org.web3d.x3d.jsail.RigidBodyPhysics.RigidBodyCollectionObject')
ScalarChaser = autoclass('org.web3d.x3d.jsail.Followers.ScalarChaserObject')
ScalarDamper = autoclass('org.web3d.x3d.jsail.Followers.ScalarDamperObject')
ScalarInterpolator = autoclass('org.web3d.x3d.jsail.Interpolation.ScalarInterpolatorObject')
ScreenFontStyle = autoclass('org.web3d.x3d.jsail.Layout.ScreenFontStyleObject')
ScreenGroup = autoclass('org.web3d.x3d.jsail.Layout.ScreenGroupObject')
Script = autoclass('org.web3d.x3d.jsail.Scripting.ScriptObject')
SegmentedVolumeData = autoclass('org.web3d.x3d.jsail.VolumeRendering.SegmentedVolumeDataObject')
ShadedVolumeStyle = autoclass('org.web3d.x3d.jsail.VolumeRendering.ShadedVolumeStyleObject')
ShaderPart = autoclass('org.web3d.x3d.jsail.Shaders.ShaderPartObject')
ShaderProgram = autoclass('org.web3d.x3d.jsail.Shaders.ShaderProgramObject')
Shape = autoclass('org.web3d.x3d.jsail.Shape.ShapeObject')
SignalPdu = autoclass('org.web3d.x3d.jsail.DIS.SignalPduObject')
SilhouetteEnhancementVolumeStyle = autoclass('org.web3d.x3d.jsail.VolumeRendering.SilhouetteEnhancementVolumeStyleObject')
SingleAxisHingeJoint = autoclass('org.web3d.x3d.jsail.RigidBodyPhysics.SingleAxisHingeJointObject')
SliderJoint = autoclass('org.web3d.x3d.jsail.RigidBodyPhysics.SliderJointObject')
Sound = autoclass('org.web3d.x3d.jsail.Sound.SoundObject')
Sphere = autoclass('org.web3d.x3d.jsail.Geometry3D.SphereObject')
SphereSensor = autoclass('org.web3d.x3d.jsail.PointingDeviceSensor.SphereSensorObject')
SplinePositionInterpolator = autoclass('org.web3d.x3d.jsail.Interpolation.SplinePositionInterpolatorObject')
SplinePositionInterpolator2D = autoclass('org.web3d.x3d.jsail.Interpolation.SplinePositionInterpolator2DObject')
SplineScalarInterpolator = autoclass('org.web3d.x3d.jsail.Interpolation.SplineScalarInterpolatorObject')
SpotLight = autoclass('org.web3d.x3d.jsail.Lighting.SpotLightObject')
SquadOrientationInterpolator = autoclass('org.web3d.x3d.jsail.Interpolation.SquadOrientationInterpolatorObject')
StaticGroup = autoclass('org.web3d.x3d.jsail.Grouping.StaticGroupObject')
StringSensor = autoclass('org.web3d.x3d.jsail.KeyDeviceSensor.StringSensorObject')
SurfaceEmitter = autoclass('org.web3d.x3d.jsail.ParticleSystems.SurfaceEmitterObject')
Switch = autoclass('org.web3d.x3d.jsail.Grouping.SwitchObject')
TexCoordChaser2D = autoclass('org.web3d.x3d.jsail.Followers.TexCoordChaser2DObject')
TexCoordDamper2D = autoclass('org.web3d.x3d.jsail.Followers.TexCoordDamper2DObject')
Text = autoclass('org.web3d.x3d.jsail.Text.TextObject')
TextureBackground = autoclass('org.web3d.x3d.jsail.EnvironmentalEffects.TextureBackgroundObject')
TextureCoordinate = autoclass('org.web3d.x3d.jsail.Texturing.TextureCoordinateObject')
TextureCoordinate3D = autoclass('org.web3d.x3d.jsail.Texturing3D.TextureCoordinate3DObject')
TextureCoordinate4D = autoclass('org.web3d.x3d.jsail.Texturing3D.TextureCoordinate4DObject')
TextureCoordinateGenerator = autoclass('org.web3d.x3d.jsail.Texturing.TextureCoordinateGeneratorObject')
TextureProperties = autoclass('org.web3d.x3d.jsail.Texturing.TexturePropertiesObject')
TextureTransform = autoclass('org.web3d.x3d.jsail.Texturing.TextureTransformObject')
TextureTransform3D = autoclass('org.web3d.x3d.jsail.Texturing3D.TextureTransform3DObject')
TextureTransformMatrix3D = autoclass('org.web3d.x3d.jsail.Texturing3D.TextureTransformMatrix3DObject')
TimeSensor = autoclass('org.web3d.x3d.jsail.Time.TimeSensorObject')
TimeTrigger = autoclass('org.web3d.x3d.jsail.EventUtilities.TimeTriggerObject')
ToneMappedVolumeStyle = autoclass('org.web3d.x3d.jsail.VolumeRendering.ToneMappedVolumeStyleObject')
TouchSensor = autoclass('org.web3d.x3d.jsail.PointingDeviceSensor.TouchSensorObject')
Transform = autoclass('org.web3d.x3d.jsail.Grouping.TransformObject')
TransformSensor = autoclass('org.web3d.x3d.jsail.EnvironmentalSensor.TransformSensorObject')
TransmitterPdu = autoclass('org.web3d.x3d.jsail.DIS.TransmitterPduObject')
TriangleFanSet = autoclass('org.web3d.x3d.jsail.Rendering.TriangleFanSetObject')
TriangleSet = autoclass('org.web3d.x3d.jsail.Rendering.TriangleSetObject')
TriangleSet2D = autoclass('org.web3d.x3d.jsail.Geometry2D.TriangleSet2DObject')
TriangleStripSet = autoclass('org.web3d.x3d.jsail.Rendering.TriangleStripSetObject')
TwoSidedMaterial = autoclass('org.web3d.x3d.jsail.Shape.TwoSidedMaterialObject')
UniversalJoint = autoclass('org.web3d.x3d.jsail.RigidBodyPhysics.UniversalJointObject')
Viewpoint = autoclass('org.web3d.x3d.jsail.Navigation.ViewpointObject')
ViewpointGroup = autoclass('org.web3d.x3d.jsail.Navigation.ViewpointGroupObject')
Viewport = autoclass('org.web3d.x3d.jsail.Layering.ViewportObject')
VisibilitySensor = autoclass('org.web3d.x3d.jsail.EnvironmentalSensor.VisibilitySensorObject')
VolumeData = autoclass('org.web3d.x3d.jsail.VolumeRendering.VolumeDataObject')
VolumeEmitter = autoclass('org.web3d.x3d.jsail.ParticleSystems.VolumeEmitterObject')
VolumePickSensor = autoclass('org.web3d.x3d.jsail.Picking.VolumePickSensorObject')
WindPhysicsModel = autoclass('org.web3d.x3d.jsail.ParticleSystems.WindPhysicsModelObject')
WorldInfo = autoclass('org.web3d.x3d.jsail.Core.WorldInfoObject')
component = autoclass('org.web3d.x3d.jsail.Core.componentObject')
connect = autoclass('org.web3d.x3d.jsail.Core.connectObject')
EXPORT = autoclass('org.web3d.x3d.jsail.Networking.EXPORTObject')
ExternProtoDeclare = autoclass('org.web3d.x3d.jsail.Core.ExternProtoDeclareObject')
field = autoclass('org.web3d.x3d.jsail.Core.fieldObject')
fieldValue = autoclass('org.web3d.x3d.jsail.Core.fieldValueObject')
head = autoclass('org.web3d.x3d.jsail.Core.headObject')
IMPORT = autoclass('org.web3d.x3d.jsail.Networking.IMPORTObject')
IS = autoclass('org.web3d.x3d.jsail.Core.ISObject')
meta = autoclass('org.web3d.x3d.jsail.Core.metaObject')
ProtoBody = autoclass('org.web3d.x3d.jsail.Core.ProtoBodyObject')
ProtoDeclare = autoclass('org.web3d.x3d.jsail.Core.ProtoDeclareObject')
ProtoInterface = autoclass('org.web3d.x3d.jsail.Core.ProtoInterfaceObject')
ROUTE = autoclass('org.web3d.x3d.jsail.Core.ROUTEObject')
Scene = autoclass('org.web3d.x3d.jsail.Core.SceneObject')
unit = autoclass('org.web3d.x3d.jsail.Core.unitObject')
X3D = autoclass('org.web3d.x3d.jsail.Core.X3DObject')
SFBool = autoclass('org.web3d.x3d.jsail.fields.SFBoolObject')
MFBool = autoclass('org.web3d.x3d.jsail.fields.MFBoolObject')
SFColor = autoclass('org.web3d.x3d.jsail.fields.SFColorObject')
MFColor = autoclass('org.web3d.x3d.jsail.fields.MFColorObject')
SFColorRGBA = autoclass('org.web3d.x3d.jsail.fields.SFColorRGBAObject')
MFColorRGBA = autoclass('org.web3d.x3d.jsail.fields.MFColorRGBAObject')
SFDouble = autoclass('org.web3d.x3d.jsail.fields.SFDoubleObject')
MFDouble = autoclass('org.web3d.x3d.jsail.fields.MFDoubleObject')
SFFloat = autoclass('org.web3d.x3d.jsail.fields.SFFloatObject')
MFFloat = autoclass('org.web3d.x3d.jsail.fields.MFFloatObject')
SFImage = autoclass('org.web3d.x3d.jsail.fields.SFImageObject')
MFImage = autoclass('org.web3d.x3d.jsail.fields.MFImageObject')
SFInt32 = autoclass('org.web3d.x3d.jsail.fields.SFInt32Object')
MFInt32 = autoclass('org.web3d.x3d.jsail.fields.MFInt32Object')
SFMatrix3d = autoclass('org.web3d.x3d.jsail.fields.SFMatrix3dObject')
MFMatrix3d = autoclass('org.web3d.x3d.jsail.fields.MFMatrix3dObject')
SFMatrix3f = autoclass('org.web3d.x3d.jsail.fields.SFMatrix3fObject')
MFMatrix3f = autoclass('org.web3d.x3d.jsail.fields.MFMatrix3fObject')
SFMatrix4d = autoclass('org.web3d.x3d.jsail.fields.SFMatrix4dObject')
MFMatrix4d = autoclass('org.web3d.x3d.jsail.fields.MFMatrix4dObject')
SFMatrix4f = autoclass('org.web3d.x3d.jsail.fields.SFMatrix4fObject')
MFMatrix4f = autoclass('org.web3d.x3d.jsail.fields.MFMatrix4fObject')
SFString = autoclass('org.web3d.x3d.jsail.fields.SFStringObject')
SFNode = autoclass('org.web3d.x3d.jsail.fields.SFNodeObject')
MFNode = autoclass('org.web3d.x3d.jsail.fields.MFNodeObject')
SFRotation = autoclass('org.web3d.x3d.jsail.fields.SFRotationObject')
MFRotation = autoclass('org.web3d.x3d.jsail.fields.MFRotationObject')
MFString = autoclass('org.web3d.x3d.jsail.fields.MFStringObject')
SFTime = autoclass('org.web3d.x3d.jsail.fields.SFTimeObject')
MFTime = autoclass('org.web3d.x3d.jsail.fields.MFTimeObject')
SFVec2d = autoclass('org.web3d.x3d.jsail.fields.SFVec2dObject')
MFVec2d = autoclass('org.web3d.x3d.jsail.fields.MFVec2dObject')
SFVec2f = autoclass('org.web3d.x3d.jsail.fields.SFVec2fObject')
MFVec2f = autoclass('org.web3d.x3d.jsail.fields.MFVec2fObject')
SFVec3d = autoclass('org.web3d.x3d.jsail.fields.SFVec3dObject')
MFVec3d = autoclass('org.web3d.x3d.jsail.fields.MFVec3dObject')
SFVec3f = autoclass('org.web3d.x3d.jsail.fields.SFVec3fObject')
MFVec3f = autoclass('org.web3d.x3d.jsail.fields.MFVec3fObject')
SFVec4d = autoclass('org.web3d.x3d.jsail.fields.SFVec4dObject')
MFVec4d = autoclass('org.web3d.x3d.jsail.fields.MFVec4dObject')
SFVec4f = autoclass('org.web3d.x3d.jsail.fields.SFVec4fObject')
MFVec4f = autoclass('org.web3d.x3d.jsail.fields.MFVec4fObject')
colorMaterial = Material().setDiffuseColor(SFColor(1,0,0))
texture = ImageTexture().setUrl('./files/JoeBodyTexture29.png')
X3D0 = (X3D().setProfile(SFString("Interchange")).setVersion(SFString("3.3"))
.setHead(head()
.addMeta(meta().setName(SFString("title")).setContent(SFString("sphere.x3d")))
.addMeta(meta().setName(SFString("creator")).setContent(SFString("John Carlson")))
.addMeta(meta().setName(SFString("generator")).setContent(SFString("manual")))
.addMeta(meta().setName(SFString("identifier")).setContent(SFString("https://coderextreme.net/X3DJSONLD/sphere.x3d")))
.addMeta(meta().setName(SFString("description")).setContent(SFString("a sphere"))))
.setScene(Scene()
.addChild(Group()
.addChild(Shape()
.setAppearance(Appearance()
.setMaterial(colorMaterial)
.setTexture(texture))
.setGeometry(Sphere().setRadius(SFFloat(1)))))))
x3domHTML = '''<script type='text/javascript' src='https://www.x3dom.org/download/dev/x3dom-full.debug.js'> </script>
<link rel='stylesheet' type='text/css' href='https://www.x3dom.org/download/dev/x3dom.css'></link>'''
xml = X3D0.toStringX3D().replace("ImageTexture", "ImageTexture crossOrigin='anonymous'")
import xml.etree.ElementTree as ET
x3domXML = ET.tostring(ET.XML(xml), encoding="unicode", short_empty_elements=False)
from IPython.display import HTML
colorMaterial.setDiffuseColor(SFColor(0,1,0))
HTML(x3domHTML + x3domXML)
x3dfile = 'sphere.x3d'
texture = ImageTexture().setUrl('./JoeBodyTexture29.png')
javax3dfilehandle = X3D0.toFileX3D(x3dfile)
inlineScene = f'''
<X3D>
<Scene>
<Inline url='"./files/{ x3dfile }"'></Inline>
</Scene>
</X3D>
'''
HTML(x3domHTML+inlineScene)
x_iteHTML = '''
<link rel="stylesheet" type="text/css" href="https://code.create3000.de/x_ite/4.5.2/dist/x_ite.css"/>
<script type="text/javascript" src="https://code.create3000.de/x_ite/4.5.2/dist/x_ite.js"></script>
<style type="text/css">
X3DCanvas {
width: 768px;
height: 432px;
}
</style>
'''
x_iteSphere = f'''
<X3DCanvas src="https://gist.githubusercontent.com/andreasplesch/15ca27439fde2a6a721fbc36d667ea15/raw/3ed8dab387d699fd8780091f1c71019073223c94/AAA_PlaneSensor1DOnEdge.x3d">
<p>Your browser may not support all features required by X_ITE.
For a better experience, keep your browser up to date.
<a href="http://outdatedbrowser.com">Check here for latest versions.</a></p>
</X3DCanvas>
<script>
fileURL = document.location + "END";
fileURL = fileURL.replace("/labEND","/files/") + "{ x3dfile }" ;
//document.querySelector('X3DCanvas').setAttribute("src", fileURL)
'''
x_iteSphere
HTML(x_iteHTML + x_iteSphere)
X3D0.toFileX_ITE('https://gist.githubusercontent.com/andreasplesch/15ca27439fde2a6a721fbc36d667ea15/raw/3ed8dab387d699fd8780091f1c71019073223c94/AAA_PlaneSensor1DOnEdge.x3d', 'x_ite.html')
from IPython.display import IFrame
IFrame("x_ite.html",600,400)
IFrame("https://gist.githack.com/andreasplesch/6710c545312428d3cd95992e5b3d47b1/raw/2425ab0ab809eea436c175dd74183e4d46bcfe3c/index.html",600,400)
| 0.601008 | 0.221919 |
# Model Selection, Underfitting and Overfitting
:label:`chapter_model_selection`
As machine learning scientists, our goal is to discover general patterns.
Say, for example, that we wish to learn the pattern
that associates genetic markers with the development of dementia in adulthood.
It's easy enough to memorize our training set.
Each person's genes uniquely identify them,
not just among people represented in our dataset,
but among all people on earth!
Given the genetic markers representing some person,
we don't want our model to simply recognize "oh, that's Bob",
and then output the classification,
say among {*dementia*, *mild cognitive impairment*, *healthy*},
that corresponds to Bob.
Rather, our goal is to discover patterns
that capture regularities in the underlying population
from which our training set was drawn.
If we are successfully in this endeavor,
then we could successfully assess risk
even for individuals that we have never encountered before.
This problem---how to discover patterns that *generalize*---is
the fundamental problem of machine learning.
The danger is that when we train models,
we access just a small sample of data.
The largest public image datasets contain roughly one million images.
And more often we have to learn from thousands or tens of thousands.
In a large hospital system we might access
hundreds of thousands of medical records.
With finite samples, we always run the risk
that we might discover *apparent* associations
that turn out not to hold up when we collect more data.
Let’s consider an extreme pathological case.
Imagine that you want to learn to predict
which people will repay their loans.
A lender hires you as a data scientist to investigate,
handing over the complete files on 100 applicants,
5 of which defaulted on their loans within 3 years.
Realistically, the files might include hundreds of potential features, including income, occupation, credit score, length of employment etc.
Moreover, say that they additionally hand over video footage
of each applicant's interview with their lending agent.
Now suppose that after featurizing the data into an enormous design matrix,
you discover that of the 5 applicants who default,
all of them were wearing blue shirts during their interviews,
while only 40% of general population wore blue shirts.
There's a good chance that if you train a predictive model
to predict default, it might rely upon blue-shirt-wearing
as an important feature.
Even if in fact defaulters were no more likely to wear blue shirts
than people in the general population,
there’s a $.4^5 = .01$ probability that
we would observe all five defaulters wearing blue shirts.
With just $5$ positive examples of defaults
and hundreds or thousands of features,
we would probably find a large number of features
that appear to be perfectly predictive of our labor
just due to random chance.
With an unlimited amount of data, we would expect
these *spurious* associations to eventually disappear.
But we seldom have that luxury.
The phenomena of fitting our training data
more closely than we fit the underlying distribution is called overfitting, and the techniques used to combat overfitting are called regularization.
In the previous sections, you might have observed
this effect while experimenting with the Fashion-MNIST dataset.
If you altered the model structure or the hyper-parameters during the experiment, you might have noticed that with enough nodes, layers, and training epochs, the model can eventually reach perfect accuracy on the training set, even as the accuracy on test data deteriorates.
## Training Error and Generalization Error
In order to discuss this phenomenon more formally,
we need to differentiate between *training error* and *generalization error*.
The training error is the error of our model
as calculated on the training data set,
while generalization error is the expectation of our model's error
were we to apply it to an infinite stream of additional data points
drawn from the same underlying data distribution as our original sample.
Problematically, *we can never calculate the generalization error exactly*.
That's because the imaginary stream of infinite data is an imaginary object.
In practice, we must *estimate* the generalization error
by applying our model to an independent test set
constituted of a random selection of data points
that were withheld from our training set.
The following three thought experiments
will help illustrate this situation better.
Consider a college student trying to prepare for his final exam.
A diligent student will strive to practice well
and test her abilities using exams from previous years.
Nonetheless, doing well on past exams is no guarantee
that she will excel when it matters.
For instance, the student might try to prepare
by rote learning the answers to the exam questions.
This requires the student to memorize many things.
She might even remember the answers for past exams perfectly.
Another student might prepare by trying to understand
the reasons for giving certain answers.
In most cases, the latter student will do much better.
Likewise, consider a model that simply uses a lookup table to answer questions. If the set of allowable inputs is discrete and reasonably small, then perhaps after viewing *many* training examples, this approach would perform well. Still this model has no ability to do better than random guessing when faced with examples that it has never seen before.
In reality the input spaces are far too large to memorize the answers corresponding to every conceivable input. For example, consider the black and white $28\times28$ images. If each pixel can take one among $256$ gray scale values, then there are $256^{784}$ possible images. That means that there are far more low-res grayscale thumbnail-sized images than there are atoms in the universe. Even if we could encounter this data, we could never afford to store the lookup table.
Lastly, consider the problem of trying
to classify the outcomes of coin tosses (class 0: heads, class 1: tails)
based on some contextual features that might be available.
No matter what algorithm we come up with,
because the generalization error will always be $\frac{1}{2}$.
However, for most algorithms,
we should expect our training error to be considerably lower,
depending on the luck of the draw,
even if we didn't have any features!
Consider the dataset {0, 1, 1, 1, 0, 1}.
Our feature-less would have to fall back on always predicting
the *majority class*, which appears from our limited sample to be *1*.
In this case, the model that always predicts class 1
will incur an error of $\frac{1}{3}$,
considerably better than our generalization error.
As we increase the amount of data,
the probability that the fraction of heads
will deviate significantly from $\frac{1}{2}$ diminishes,
and our training error would come to match the generalization error.
### Statistical Learning Theory
Since generalization is the fundamental problem in machine learning,
you might not be surprised to learn
that many mathematicians and theorists have dedicated their lives
to developing formal theories to describe this phenomenon.
In their [eponymous theorem](https://en.wikipedia.org/wiki/Glivenko%E2%80%93Cantelli_theorem), Glivenko and Cantelli
derived the rate at which the training error
converges to the generalization error.
In a series of seminal papers, [Vapnik and Chervonenkis](https://en.wikipedia.org/wiki/Vapnik%E2%80%93Chervonenkis_theory)
extended this theory to more general classes of functions.
This work laid the foundations of [Statistical Learning Theory](https://en.wikipedia.org/wiki/Statistical_learning_theory).
In the **standard supervised learning setting**, which we have addressed up until now and will stick throughout most of this book,
we assume that both the training data and the test data
are drawn *independently* from *identical* distributions
(commonly called the i.i.d. assumption).
This means that the process that samples our data has no *memory*.
The 2nd example drawn and the 3rd drawn
are no more correlated than the 2nd and the 2-millionth sample drawn.
Being a good machine learning scientist requires thinking critically,
and already you should be poking holes in this assumption,
coming up with common cases where the assumption fails.
What if we train a mortality risk predictor
on data collected from patients at UCSF,
and apply it on patients at Massachusetts General Hospital?
These distributions are simply not identical.
Moreover, draws might be correlated in time.
What if we are classifying the topics of Tweets.
The news cycle would create temporal dependencies
in the topics being discussed violating any assumptions of independence.
Sometimes we can get away with minor violations of the i.i.d. assumption
and our models will continue to work remarkably well.
After all, nearly every real-world application
involves at least some minor violation of the i.i.d. assumption,
and yet we have useful tools for face recognition,
speech recognition, language translation, etc.
Other violations are sure to cause trouble.
Imagine, for example, if we tried to train
a face recognition system by training it
exclusively on university students
and then want to deploy it as a tool
for monitoring geriatrics in a nursing home population.
This is unlikely to work well since college students
tend to look considerably different from the elderly.
In subsequent chapters and volumes, we will discuss problems
arising from violations of the i.i.d. assumption.
For now, even taking the i.i.d. assumption for granted,
understanding generalization is a formidable problem.
Moreover, elucidating the precise theoretical foundations
that might explain why deep neural networks generalize as well as they do
continues to vexes the greatest minds in learning theory.
When we train our models, we attempt searching for a function
that fits the training data as well as possible.
If the function is so flexible that it can catch on to spurious patterns
just as easily as to the true associations,
then it might perform *too well* without producing a model
that generalizes well to unseen data.
This is precisely what we want to avoid (or at least control).
Many of the techniques in deep learning are heuristics and tricks
aimed at guarding against overfitting.
### Model Complexity
When we have simple models and abundant data,
we expect the generalization error to resemble the training error.
When we work with more complex models and fewer examples,
we expect the training error to go down but the generalization gap to grow.
What precisely constitutes model complexity is a complex matter.
Many factors govern whether a model will generalize well.
For example a model with more parameters might be considered more complex.
A model whose parameters can take a wider range of values
might be more complex.
Often with neural networks, we think of a model
that takes more training steps as more complex,
and one subject to *early stopping* as less complex.
It can be difficult to compare the complexity among members
of substantially different model classes
(say a decision tree versus a neural network).
For now, a simple rule of thumb is quite useful:
A model that can readily explain arbitrary facts
is what statisticians view as complex,
whereas one that has only a limited expressive power
but still manages to explain the data well
is probably closer to the truth.
In philosophy, this is closely related to Popper’s
criterion of [falsifiability](https://en.wikipedia.org/wiki/Falsifiability)
of a scientific theory: a theory is good if it fits data
and if there are specific tests which can be used to disprove it.
This is important since all statistical estimation is
[post hoc](https://en.wikipedia.org/wiki/Post_hoc),
i.e. we estimate after we observe the facts,
hence vulnerable to the associated fallacy.
For now, we'll put the philosophy aside and stick to more tangible issues.
In this chapter, to give you some intuition,
we’ll focus on a few factors that tend
to influence the generalizability of a model class:
1. The number of tunable parameters. When the number of tunable parameters, sometimes called the *degrees of freedom*, is large, models tend to be more susceptible to overfitting.
1. The values taken by the parameters. When weights can take a wider range of values, models can be more susceptible to over fitting.
1. The number of training examples. It’s trivially easy to overfit a dataset containing only one or two examples even if your model is simple. But overfitting a dataset with millions of examples requires an extremely flexible model.
## Model Selection
In machine learning, we usually select our final model
after evaluating several candidate models.
This process is called model selection.
Sometimes the models subject to comparison
are fundamentally different in nature
(say, decision trees vs linear models).
At other times, we are comparing
members of the same class of models
that have been trained with different hyperparameter settings.
With multilayer perceptrons for example,
we may wish to compare models with
different numbers of hidden layers,
different numbers of hidden units,
and various choices of the activation functions
applied to each hidden layer.
In order to determine the best among our candidate models,
we will typically employ a validation set.
### Validation Data Set
In principle we should not touch our test set
until after we have chosen all our hyper-parameters.
Were we to use the test data in the model selection process,
there's a risk that we might overfit the test data.
Then we would be in serious trouble.
If we overfit our training data,
there's always the evaluation on test data to keep us honest.
But if we overfit the test data, how would we ever know?
Thus, we should never rely on the test data for model selection.
And yet we cannot rely solely on the training data
for model selection either because
we cannot estimate the generalization error
on the very data that we use to train the model.
The common practice to address this problem
is to split our data three ways,
incorporating a *validation set*
in addition to the training and test sets.
In practical applications, the picture gets muddier.
While ideally we would only touch the test data once,
to assess the very best model or to compare
a small number of models to each other,
real-world test data is seldom discarded after just one use.
We can seldom afford a new test set for each round of experiments.
The result is a murky practice where the boundaries
between validation and test data are worryingly ambiguous.
Unless explicitly stated otherwise, in the experiments in this book
we are really working with what should rightly be called
training data and validation data, with no true test sets.
Therefore, the accuracy reported in each experiment
is really the validation accuracy and not a true test set accuracy.
The good news is that we don't need too much data in the validation set.
The uncertainty in our estimates can be shown
to be of the order of $O(n^{-\frac{1}{2}})$.
### $K$-Fold Cross-Validation
When training data is scarce,
we might not even be able to afford to hold out
enough data to constitute a proper validation set.
One popular solution to this problem is to employ
$K$*-fold cross-validation*.
Here, the original training data is split into $K$ non-overlapping subsets.
Then model training and validation are executed $K$ times,
each time training on $K-1$ subsets and validating
on a different subset (the one not used for training in that round).
Finally, the training and validation error rates are estimated
by averaging over the results from the $K$ experiments.
## Underfitting or Overfitting?
When we compare the training and validation errors,
we want to be mindful of two common situations:
First, we want to watch out for cases
when our training error and validation error are both substantial
but there is a little gap between them.
If the model is unable to reduce the training error,
that could mean that our model is too simple
(i.e., insufficiently expressive)
to capture the pattern that we are trying to model.
Moreover, since the *generalization gap*
between our training and validation errors is small,
we have reason to believe that we could get away with a more complex model.
This phenomenon is known as underfitting.
On the other hand, as we discussed above,
we want to watch out for the cases
when our training error is significantly lower
than our validation error, indicating severe overfitting.
Note that overfitting is not always a bad thing.
With deep learning especially, it's well known
that the best predictive models often perform
far better on training data than on holdout data.
Ultimately, we usually care more about the validation error
than about the gap between the training and validation errors.
Whether we overfit or underfit can depend
both on the complexity of our model
and the size of the available training datasets,
two topics that we discuss below.
### Model Complexity
To illustrate some classical intuition
about overfitting and model complexity,
we given an example using polynomials.
Given training data consisting of a single feature $x$
and a corresponding real-valued label $y$,
we try to find the polynomial of degree $d$
$$\hat{y}= \sum_{i=0}^d x^i w_i$$
to estimate the labels $y$.
This is just a linear regression problem
where our features are given by the powers of $x$,
the $w_i$ given the model’s weights,
and the bias is given by $w_0$ since $x^0 = 1$ for all $x$.
Since this is just a linear regression problem,
we can use the squared error as our loss function.
A higher-order polynomial function is more complex
than a lower order polynomial function,
since the higher-order polynomial has more parameters
and the model function’s selection range is wider.
Fixing the training data set,
higher-order polynomial functions should always
achieve lower (at worst, equal) training error
relative to lower degree polynomials.
In fact, whenever the data points each have a distinct value of $x$,
a polynomial function with degree equal to the number of data points
can fit the training set perfectly.
We visualize the relationship between polynomial degree
and under- vs over-fitting below.

### Data Set Size
The other big consideration to bear in mind is the dataset size.
Fixing our model, the fewer samples we have in the training dataset,
the more likely (and more severely) we are to encounter overfitting.
As we increase the amount of training data,
the generalization error typically decreases.
Moreover, in general, more data never hurts.
For a fixed task and data *distribution*,
there is typically a relationship between model complexity and dataset size.
Given more data, we might profitably attempt to fit a more complex model.
Absent sufficient data, simpler models may be difficult to beat.
For many tasks, deep learning only outperforms linear models
when many thousands of training examples are available.
In part, the current success of deep learning
owes to the current abundance of massive datasets
due to internet companies, cheap storage, connected devices,
and the broad digitization of the economy.
## Polynomial Regression
We can now explore these concepts interactively
by fitting polynomials to data.
To get started we'll import our usual packages.
```
import d2l
from mxnet import autograd, gluon, np, npx
from mxnet.gluon import nn
npx.set_np()
```
### Generating Data Sets
First we need data. Given $x$, we will use the following cubic polynomial to generate the labels on training and test data:
$$y = 5 + 1.2x - 3.4\frac{x^2}{2!} + 5.6 \frac{x^3}{3!} + \epsilon \text{ where }
\epsilon \sim \mathcal{N}(0,0.1)$$
The noise term $\epsilon$ obeys a normal distribution
with a mean of 0 and a standard deviation of 0.1.
We'll synthesize 100 samples each for the training set and test set.
```
maxdegree = 20 # Maximum degree of the polynomial
n_train, n_test = 100, 100 # Training and test data set sizes
true_w = np.zeros(maxdegree) # Allocate lots of empty space
true_w[0:4] = np.array([5, 1.2, -3.4, 5.6])
features = np.random.normal(size=(n_train + n_test, 1))
features = np.random.shuffle(features)
poly_features = np.power(features, np.arange(maxdegree).reshape(1, -1))
poly_features = poly_features / (
npx.gamma(np.arange(maxdegree) + 1).reshape(1, -1))
labels = np.dot(poly_features, true_w)
labels += np.random.normal(scale=0.1, size=labels.shape)
```
For optimization, we typically want to avoid
very large values of gradients, losses, etc.
This is why the monomials stored in `poly_features`
are rescaled from $x^i$ to $\frac{1}{i!} x^i$.
It allows us to avoid very large values for large exponents $i$.
Factorials are implemented in Gluon using the Gamma function,
where $n! = \Gamma(n+1)$.
Take a look at the first 2 samples from the generated data set.
The value 1 is technically a feature,
namely the constant feature corresponding to the bias.
```
features[:2], poly_features[:2], labels[:2]
```
### Defining, Training and Testing Model
Let first implement a function to evaluate the loss on a given data.
```
# Save to the d2l package.
def evaluate_loss(net, data_iter, loss):
"""Evaluate the loss of a model on the given dataset"""
metric = d2l.Accumulator(2) # sum_loss, num_examples
for X, y in data_iter:
metric.add(loss(net(X), y).sum(), y.size)
return metric[0] / metric[1]
```
Now define the training function.
```
def train(train_features, test_features, train_labels, test_labels,
num_epochs=1000):
loss = gluon.loss.L2Loss()
net = nn.Sequential()
# Switch off the bias since we already catered for it in the polynomial
# features
net.add(nn.Dense(1, use_bias=False))
net.initialize()
batch_size = min(10, train_labels.shape[0])
train_iter = d2l.load_array((train_features, train_labels), batch_size)
test_iter = d2l.load_array((test_features, test_labels), batch_size,
is_train=False)
trainer = gluon.Trainer(net.collect_params(), 'sgd',
{'learning_rate': 0.01})
animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log',
xlim=[1,num_epochs], ylim=[1e-3, 1e2],
legend=['train', 'test'])
for epoch in range(1, num_epochs+1):
d2l.train_epoch_ch3(net, train_iter, loss, trainer)
if epoch % 50 == 0:
animator.add(epoch, (evaluate_loss(net, train_iter, loss),
evaluate_loss(net, test_iter, loss)))
print('weight:', net[0].weight.data().asnumpy())
```
### Third-order Polynomial Function Fitting (Normal)
We will begin by first using a third-order polynomial function
with the same order as the data generation function.
The results show that this model’s training error rate
when using the testing data set is low.
The trained model parameters are also close
to the true values $w = [5, 1.2, -3.4, 5.6]$.
```
# Pick the first four dimensions, i.e. 1, x, x^2, x^3 from the polynomial
# features
train(poly_features[:n_train, 0:4], poly_features[n_train:, 0:4],
labels[:n_train], labels[n_train:])
```
### Linear Function Fitting (Underfitting)
Let’s take another look at linear function fitting.
After the decline in the early epoch,
it becomes difficult to further decrease
this model’s training error rate.
After the last epoch iteration has been completed,
the training error rate is still high.
When used to fit non-linear patterns
(like the third-order polynomial function here)
linear models are liable to underfit.
```
# Pick the first four dimensions, i.e. 1, x from the polynomial features
train(poly_features[:n_train, 0:3], poly_features[n_train:, 0:3],
labels[:n_train], labels[n_train:])
```
### Insufficient Training (Overfitting)
Now let's try to train the model
using a polynomial of too high degree.
Here, there is insufficient data to learn that
the higher-degree coefficients should have values close to zero.
As a result, our overly-complex model
is far too susceptible to being influenced
by noise in the training data.
Of course, our training error will now be low
(even lower than if we had the right model!)
but our test error will be high.
Try out different model complexities (`n_degree`)
and training set sizes (`n_subset`)
to gain some intuition of what is happening.
```
n_subset = 100 # Subset of data to train on
n_degree = 20 # Degree of polynomials
train(poly_features[1:n_subset, 0:n_degree],
poly_features[n_train:, 0:n_degree], labels[1:n_subset],
labels[n_train:])
```
In later chapters, we will continue
to discuss overfitting problems
and methods for dealing with them,
such as weight decay and dropout.
## Summary
* Since the generalization error rate cannot be estimated based on the training error rate, simply minimizing the training error rate will not necessarily mean a reduction in the generalization error rate. Machine learning models need to be careful to safeguard against overfitting such as to minimize the generalization error.
* A validation set can be used for model selection (provided that it isn't used too liberally).
* Underfitting means that the model is not able to reduce the training error rate while overfitting is a result of the model training error rate being much lower than the testing data set rate.
* We should choose an appropriately complex model and avoid using insufficient training samples.
## Exercises
1. Can you solve the polynomial regression problem exactly? Hint - use linear algebra.
1. Model selection for polynomials
* Plot the training error vs. model complexity (degree of the polynomial). What do you observe?
* Plot the test error in this case.
* Generate the same graph as a function of the amount of data?
1. What happens if you drop the normalization of the polynomial features $x^i$ by $1/i!$. Can you fix this in some other way?
1. What degree of polynomial do you need to reduce the training error to 0?
1. Can you ever expect to see 0 generalization error?
## Scan the QR Code to [Discuss](https://discuss.mxnet.io/t/2341)

|
github_jupyter
|
import d2l
from mxnet import autograd, gluon, np, npx
from mxnet.gluon import nn
npx.set_np()
maxdegree = 20 # Maximum degree of the polynomial
n_train, n_test = 100, 100 # Training and test data set sizes
true_w = np.zeros(maxdegree) # Allocate lots of empty space
true_w[0:4] = np.array([5, 1.2, -3.4, 5.6])
features = np.random.normal(size=(n_train + n_test, 1))
features = np.random.shuffle(features)
poly_features = np.power(features, np.arange(maxdegree).reshape(1, -1))
poly_features = poly_features / (
npx.gamma(np.arange(maxdegree) + 1).reshape(1, -1))
labels = np.dot(poly_features, true_w)
labels += np.random.normal(scale=0.1, size=labels.shape)
features[:2], poly_features[:2], labels[:2]
# Save to the d2l package.
def evaluate_loss(net, data_iter, loss):
"""Evaluate the loss of a model on the given dataset"""
metric = d2l.Accumulator(2) # sum_loss, num_examples
for X, y in data_iter:
metric.add(loss(net(X), y).sum(), y.size)
return metric[0] / metric[1]
def train(train_features, test_features, train_labels, test_labels,
num_epochs=1000):
loss = gluon.loss.L2Loss()
net = nn.Sequential()
# Switch off the bias since we already catered for it in the polynomial
# features
net.add(nn.Dense(1, use_bias=False))
net.initialize()
batch_size = min(10, train_labels.shape[0])
train_iter = d2l.load_array((train_features, train_labels), batch_size)
test_iter = d2l.load_array((test_features, test_labels), batch_size,
is_train=False)
trainer = gluon.Trainer(net.collect_params(), 'sgd',
{'learning_rate': 0.01})
animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log',
xlim=[1,num_epochs], ylim=[1e-3, 1e2],
legend=['train', 'test'])
for epoch in range(1, num_epochs+1):
d2l.train_epoch_ch3(net, train_iter, loss, trainer)
if epoch % 50 == 0:
animator.add(epoch, (evaluate_loss(net, train_iter, loss),
evaluate_loss(net, test_iter, loss)))
print('weight:', net[0].weight.data().asnumpy())
# Pick the first four dimensions, i.e. 1, x, x^2, x^3 from the polynomial
# features
train(poly_features[:n_train, 0:4], poly_features[n_train:, 0:4],
labels[:n_train], labels[n_train:])
# Pick the first four dimensions, i.e. 1, x from the polynomial features
train(poly_features[:n_train, 0:3], poly_features[n_train:, 0:3],
labels[:n_train], labels[n_train:])
n_subset = 100 # Subset of data to train on
n_degree = 20 # Degree of polynomials
train(poly_features[1:n_subset, 0:n_degree],
poly_features[n_train:, 0:n_degree], labels[1:n_subset],
labels[n_train:])
| 0.874614 | 0.986324 |
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.ensemble import BaggingClassifier, RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
df = pd.read_csv('HR-dataset.csv')
np.random.seed(42)
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
target = 'left'
features = df.columns.drop(target)
features = features.drop('empid') # Удалим идентификатор пользователя как нерепрезентативный признак
print(features)
X, y = df[features].copy(), df[target]
salary_ordinals = {'low': 1, 'medium': 2, 'high': 3}
X['dept'] = X['dept'].apply(X['dept'].value_counts().get)
X['salary'] = X['salary'].apply(salary_ordinals.get)
scaler = StandardScaler()
X = pd.DataFrame(data=scaler.fit_transform(X), columns=X.columns)
def estimate_accuracy(clf, X, y, cv=5):
return cross_val_score(clf, X, y, cv=5, scoring='accuracy').mean()
tree = DecisionTreeClassifier(max_depth=30)
print("Decision tree:", estimate_accuracy(tree, X, y))
bagging_trees = BaggingClassifier(tree)
print("Decision tree bagging:", estimate_accuracy(bagging_trees, X, y))
random_tree = DecisionTreeClassifier(max_features=int(np.sqrt(len(features))), max_depth=30)
print("Random tree:", estimate_accuracy(random_tree, X, y))
bagging_random_trees = BaggingClassifier(random_tree)
print("Random tree bagging:", estimate_accuracy(bagging_random_trees, X, y))
random_forest = RandomForestClassifier(
n_estimators=100,
n_jobs=-1,
max_features=int(np.sqrt(len(features))),
max_depth=30)
print("Random Forest:", estimate_accuracy(random_forest, X, y))
random_forest = RandomForestClassifier(
n_estimators=100,
max_features=int(np.sqrt(len(features))),
max_depth=30,
oob_score=True,
n_jobs=-1
)
random_forest.fit(X, y)
random_forest.oob_score_.mean()
lr = LogisticRegression(solver='saga', max_iter=200)
lr.fit(X, y)
print("LR:", estimate_accuracy(lr, X, y))
random_logreg = BaggingClassifier(
lr,
n_estimators=10,
n_jobs=-1,
random_state=42
)
print("Bagging for LR:", estimate_accuracy(random_logreg, X, y))
random_logreg = BaggingClassifier(
lr,
n_estimators=10,
n_jobs=-1,
max_features=0.5,
random_state=42
)
print("Bagging for LR:", estimate_accuracy(random_logreg, X, y))
def plot_predictions(X, y, clf, proba=False, points_size=7, xlabel='x', ylabel='y'):
"""Fits the classifier on the data (X, y) and plots the result on a 2-D plane."""
def get_grid(data):
x_std, y_std = data.std(axis=0)
x_min, x_max = data[:, 0].min() - x_std / 2, data[:, 0].max() + x_std / 2
y_min, y_max = data[:, 1].min() - y_std / 2, data[:, 1].max() + y_std / 2
return np.meshgrid(np.linspace(x_min, x_max, num=200),
np.linspace(y_min, y_max, num=200))
clf.fit(X, y)
xx, yy = get_grid(X)
if proba:
predicted = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1].reshape(xx.shape)
else:
predicted = clf.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)
plt.figure(figsize=(10.0, 10.0))
plt.pcolormesh(xx, yy, predicted, cmap=plt.cm.coolwarm, alpha=0.1)
plt.scatter(X[:, 0], X[:, 1], c=y, s=points_size, cmap=plt.cm.coolwarm, alpha=0.90)
plt.ylim([yy.min(),yy.max()])
plt.xlim([xx.min(),xx.max()])
plt.xlabel(xlabel)
plt.ylabel(ylabel)
return clf
```
|
github_jupyter
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.ensemble import BaggingClassifier, RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
df = pd.read_csv('HR-dataset.csv')
np.random.seed(42)
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
target = 'left'
features = df.columns.drop(target)
features = features.drop('empid') # Удалим идентификатор пользователя как нерепрезентативный признак
print(features)
X, y = df[features].copy(), df[target]
salary_ordinals = {'low': 1, 'medium': 2, 'high': 3}
X['dept'] = X['dept'].apply(X['dept'].value_counts().get)
X['salary'] = X['salary'].apply(salary_ordinals.get)
scaler = StandardScaler()
X = pd.DataFrame(data=scaler.fit_transform(X), columns=X.columns)
def estimate_accuracy(clf, X, y, cv=5):
return cross_val_score(clf, X, y, cv=5, scoring='accuracy').mean()
tree = DecisionTreeClassifier(max_depth=30)
print("Decision tree:", estimate_accuracy(tree, X, y))
bagging_trees = BaggingClassifier(tree)
print("Decision tree bagging:", estimate_accuracy(bagging_trees, X, y))
random_tree = DecisionTreeClassifier(max_features=int(np.sqrt(len(features))), max_depth=30)
print("Random tree:", estimate_accuracy(random_tree, X, y))
bagging_random_trees = BaggingClassifier(random_tree)
print("Random tree bagging:", estimate_accuracy(bagging_random_trees, X, y))
random_forest = RandomForestClassifier(
n_estimators=100,
n_jobs=-1,
max_features=int(np.sqrt(len(features))),
max_depth=30)
print("Random Forest:", estimate_accuracy(random_forest, X, y))
random_forest = RandomForestClassifier(
n_estimators=100,
max_features=int(np.sqrt(len(features))),
max_depth=30,
oob_score=True,
n_jobs=-1
)
random_forest.fit(X, y)
random_forest.oob_score_.mean()
lr = LogisticRegression(solver='saga', max_iter=200)
lr.fit(X, y)
print("LR:", estimate_accuracy(lr, X, y))
random_logreg = BaggingClassifier(
lr,
n_estimators=10,
n_jobs=-1,
random_state=42
)
print("Bagging for LR:", estimate_accuracy(random_logreg, X, y))
random_logreg = BaggingClassifier(
lr,
n_estimators=10,
n_jobs=-1,
max_features=0.5,
random_state=42
)
print("Bagging for LR:", estimate_accuracy(random_logreg, X, y))
def plot_predictions(X, y, clf, proba=False, points_size=7, xlabel='x', ylabel='y'):
"""Fits the classifier on the data (X, y) and plots the result on a 2-D plane."""
def get_grid(data):
x_std, y_std = data.std(axis=0)
x_min, x_max = data[:, 0].min() - x_std / 2, data[:, 0].max() + x_std / 2
y_min, y_max = data[:, 1].min() - y_std / 2, data[:, 1].max() + y_std / 2
return np.meshgrid(np.linspace(x_min, x_max, num=200),
np.linspace(y_min, y_max, num=200))
clf.fit(X, y)
xx, yy = get_grid(X)
if proba:
predicted = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1].reshape(xx.shape)
else:
predicted = clf.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)
plt.figure(figsize=(10.0, 10.0))
plt.pcolormesh(xx, yy, predicted, cmap=plt.cm.coolwarm, alpha=0.1)
plt.scatter(X[:, 0], X[:, 1], c=y, s=points_size, cmap=plt.cm.coolwarm, alpha=0.90)
plt.ylim([yy.min(),yy.max()])
plt.xlim([xx.min(),xx.max()])
plt.xlabel(xlabel)
plt.ylabel(ylabel)
return clf
| 0.621771 | 0.558508 |
## Mounting Google Drive & loading training & test images
```
from google.colab import drive
drive.mount('/content/gdrive')
!ln -s /content/gdrive/My\ Drive/ /mydrive
!ls /mydrive
!mkdir face_emotion
!pwd
!cp /mydrive/face_emotion/data.zip face_emotion/
!unzip face_emotion/data.zip
!ls
!pip install livelossplot
```
## Importing necessary libraries
```
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import os
%matplotlib inline
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers import Dense, Input, Dropout,Flatten, Conv2D
from tensorflow.keras.layers import BatchNormalization, Activation, MaxPooling2D
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.utils import plot_model
from IPython.display import SVG, Image
from livelossplot import PlotLossesKerasTF
import tensorflow as tf
print("Tensorflow version:", tf.__version__)
print("Training set details: ")
for expression in os.listdir("train/"):
print(str(len(os.listdir("train/" + expression))) + " " + expression + " images")
print("Test set details:")
for expression in os.listdir("test/"):
print(str(len(os.listdir("test/" + expression))) + " " + expression + " images")
```
## Setting dataset path
```
img_size = 48
batch_size = 64
datagen_train = ImageDataGenerator(horizontal_flip=True)
train_generator = datagen_train.flow_from_directory("train/",
target_size=(img_size,img_size),
color_mode="grayscale",
batch_size=batch_size,
class_mode='categorical',
shuffle=True)
datagen_validation = ImageDataGenerator(horizontal_flip=True)
validation_generator = datagen_validation.flow_from_directory("test/",
target_size=(img_size,img_size),
color_mode="grayscale",
batch_size=batch_size,
class_mode='categorical',
shuffle=False)
!ls
```
## Model Architecture
```
# Initialising the CNN
model = Sequential()
# 1 - Convolution
model.add(Conv2D(64,(3,3), padding='same', input_shape=(48, 48,1)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# 2nd Convolution layer
model.add(Conv2D(128,(5,5), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# 3rd Convolution layer
model.add(Conv2D(512,(3,3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# 4th Convolution layer
model.add(Conv2D(512,(3,3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# Flattening
model.add(Flatten())
# Fully connected layer 1st layer
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
# Fully connected layer 2nd layer
model.add(Dense(512))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Dense(7, activation='softmax'))
opt = Adam(lr=0.005)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
```
## Training the model
```
%%time
epochs = 30
steps_per_epoch = train_generator.n//train_generator.batch_size
validation_steps = validation_generator.n//validation_generator.batch_size
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1,
patience=2, min_lr=0.00001, mode='auto')
checkpoint = ModelCheckpoint("model_weights.h5", monitor='val_accuracy',
save_weights_only=True, mode='max', verbose=1)
callbacks = [PlotLossesKerasTF(), checkpoint, reduce_lr]
history = model.fit(
x=train_generator,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
validation_data = validation_generator,
validation_steps = validation_steps,
callbacks=callbacks
)
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
!cp model_weights.h5 /mydrive/face_emotion/
```
|
github_jupyter
|
from google.colab import drive
drive.mount('/content/gdrive')
!ln -s /content/gdrive/My\ Drive/ /mydrive
!ls /mydrive
!mkdir face_emotion
!pwd
!cp /mydrive/face_emotion/data.zip face_emotion/
!unzip face_emotion/data.zip
!ls
!pip install livelossplot
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import os
%matplotlib inline
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers import Dense, Input, Dropout,Flatten, Conv2D
from tensorflow.keras.layers import BatchNormalization, Activation, MaxPooling2D
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.utils import plot_model
from IPython.display import SVG, Image
from livelossplot import PlotLossesKerasTF
import tensorflow as tf
print("Tensorflow version:", tf.__version__)
print("Training set details: ")
for expression in os.listdir("train/"):
print(str(len(os.listdir("train/" + expression))) + " " + expression + " images")
print("Test set details:")
for expression in os.listdir("test/"):
print(str(len(os.listdir("test/" + expression))) + " " + expression + " images")
img_size = 48
batch_size = 64
datagen_train = ImageDataGenerator(horizontal_flip=True)
train_generator = datagen_train.flow_from_directory("train/",
target_size=(img_size,img_size),
color_mode="grayscale",
batch_size=batch_size,
class_mode='categorical',
shuffle=True)
datagen_validation = ImageDataGenerator(horizontal_flip=True)
validation_generator = datagen_validation.flow_from_directory("test/",
target_size=(img_size,img_size),
color_mode="grayscale",
batch_size=batch_size,
class_mode='categorical',
shuffle=False)
!ls
# Initialising the CNN
model = Sequential()
# 1 - Convolution
model.add(Conv2D(64,(3,3), padding='same', input_shape=(48, 48,1)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# 2nd Convolution layer
model.add(Conv2D(128,(5,5), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# 3rd Convolution layer
model.add(Conv2D(512,(3,3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# 4th Convolution layer
model.add(Conv2D(512,(3,3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# Flattening
model.add(Flatten())
# Fully connected layer 1st layer
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
# Fully connected layer 2nd layer
model.add(Dense(512))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Dense(7, activation='softmax'))
opt = Adam(lr=0.005)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
%%time
epochs = 30
steps_per_epoch = train_generator.n//train_generator.batch_size
validation_steps = validation_generator.n//validation_generator.batch_size
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1,
patience=2, min_lr=0.00001, mode='auto')
checkpoint = ModelCheckpoint("model_weights.h5", monitor='val_accuracy',
save_weights_only=True, mode='max', verbose=1)
callbacks = [PlotLossesKerasTF(), checkpoint, reduce_lr]
history = model.fit(
x=train_generator,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
validation_data = validation_generator,
validation_steps = validation_steps,
callbacks=callbacks
)
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
!cp model_weights.h5 /mydrive/face_emotion/
| 0.613352 | 0.688023 |
## Libraries
```
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
import pandas as pd
import csv
import unicodecsv
import os
Path = os.getcwd()
```
## .csv files
```
primary = (Path + "/primary.csv")
second = (Path + "/second.csv")
third = (Path + "/third.csv")
fourth = (Path + "/fourth.csv")
fifth = (Path + "/fifth.csv")
```
## Reading the .csv files
```
primary_ = []
with open(primary, encoding='utf-8') as file:
new = csv.reader(file)
for row in new:
primary_.append(row)
prim = pd.DataFrame(primary_)
prim_2 = pd.DataFrame(primary_)
prim_2 = prim_2.rename(columns={0: 'prefName', 1: "locName", 2: 'Label'})
#prim_2[1:60]
second_ = []
with open(second, encoding='utf-8') as file:
new = csv.reader(file)
for row in new:
second_.append(row)
sec_ = pd.DataFrame(second_)
sec_ = sec_.replace([None], [''])
#sec
third_ = []
with open(third, encoding='utf-8') as file:
new = csv.reader(file)
for row in new:
third_.append(row)
thd_ = pd.DataFrame(third_)
thd_ = thd_.replace([None], [''])
#thd
fourth_ = []
with open(fourth, encoding='utf-8') as file:
new = csv.reader(file)
for row in new:
fourth_.append(row)
frt_ = pd.DataFrame(fourth_)
frt_ = frt_.replace([None], [''])
#eT
fifth_ = []
with open(fifth, encoding='utf-8') as file:
new = csv.reader(file)
for row in new:
fifth_.append(row)
fft_ = pd.DataFrame(fifth_)
fft_ = fft_.replace([None], [''])
#fft
```
# MATCHES
## Matching (primary-sec)
```
mat1 = []
mat2 = []
list1 = prim[1].tolist()
list2 = sec_[0].tolist()
threshold = 100
for i in list1:
mat1.append(process.extract(i, list2, limit=2))
prim['matches'] = mat1
p = []
for j in prim['matches']:
for k in j:
if k[1] >= threshold:
p.append(k[0])
mat2.append(",".join(p))
p = []
prim['matches'] = mat2
match = prim[prim.matches != ""]
match
sec_2 = sec_[sec_[0].isin(match["matches"])]
sec_2 = sec_2.rename(columns={0: 'locName'})
del sec_2[1]
sec_2
```
## Matching (primary-thd)
```
mat1 = []
mat2 = []
list1 = prim[1].tolist()
list2 = thd_[0].tolist()
threshold = 100
for i in list1:
mat1.append(process.extract(i, list2, limit=2))
prim['matches'] = mat1
p = []
for j in prim['matches']:
for k in j:
if k[1] >= threshold:
p.append(k[0])
mat2.append(",".join(p))
p = []
prim['matches'] = mat2
match = prim[prim.matches != ""]
match
thd_2 = thd_[thd_[0].isin(match["matches"])]
thd_2 = thd_2.rename(columns={0: 'locName'})
del thd_2[1]
thd_2
```
## Matching (primary-frt)
```
mat1 = []
mat2 = []
list1 = prim[1].tolist()
list2 = frt_[0].tolist()
threshold = 100
for i in list1:
mat1.append(process.extract(i, list2, limit=2))
prim['matches'] = mat1
p = []
for j in prim['matches']:
for k in j:
if k[1] >= threshold:
p.append(k[0])
mat2.append(",".join(p))
p = []
prim['matches'] = mat2
match = prim[prim.matches != ""]
match
frt_2 = frt_[frt_[0].isin(match["matches"])]
frt_2 = frt_2.rename(columns={0: 'locName'})
del frt_2[1]
frt_2
```
## Matching (primary-fft)
```
mat1 = []
mat2 = []
list1 = prim[1].tolist()
list2 = fft_[0].tolist()
threshold = 100
for i in list1:
mat1.append(process.extract(i, list2, limit=2))
prim['matches'] = mat1
p = []
for j in prim['matches']:
for k in j:
if k[1] >= threshold:
p.append(k[0])
mat2.append(",".join(p))
p = []
prim['matches'] = mat2
match = prim[prim.matches != ""]
match
fft_2 = fft_[fft_[0].isin(match["matches"])]
fft_2 = fft_2.rename(columns={0: 'locName'})
del fft_2[1]
#fft_2
```
## Merging dataframes
```
prim_2 = pd.DataFrame(prim_2, columns=["prefName", "locName", "Label"])
df = prim_2.merge(sec_2, how = 'left', left_on=["locName"], right_on=["locName"])
#df[30:80]
df_2 = df.merge(thd_2, how = 'left', left_on=["locName"], right_on=["locName"])
#df_2 = df_2.replace([None], [''])
#df_2[1:60]
df_3 = df_2.merge(frt_2, how = 'left', left_on=["locName"], right_on=["locName"])
#df_3 = df_3.replace([None], [''])
#df_3[1:60]
df_4 = df_3.merge(fft_2, how = 'left', left_on=["locName"], right_on=["locName"])
#df_4[1:60]
```
## Justify (move row content to the left)
```
def justify(a, invalid_val=0, axis=1, side='left'):
if invalid_val is np.nan:
mask = pd.notnull(a)
else:
mask = a!=invalid_val
justified_mask = np.sort(mask,axis=axis)
if (side=='up') | (side=='left'):
justified_mask = np.flip(justified_mask,axis=axis)
out = np.full(a.shape, invalid_val, dtype=object)
if axis==1:
out[justified_mask] = a[mask]
else:
out.T[justified_mask.T] = a.T[mask.T]
return out
import numpy as np
arr = justify(df_4.to_numpy(), invalid_val=np.nan)
df_4 = pd.DataFrame(arr).dropna(axis=1, how='all')
#df_2 = df_2.replace([None], [''])
df_4 = df_4.rename(columns={0: 'prefName', 1: "locName",2: "Label"})
del df_4["locName"]
df_4[1:60]
```
## Create a final .csv file and export it!
```
df_4.to_csv(Path + "/final_file.csv", index=False, encoding='utf-8-sig')
```
|
github_jupyter
|
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
import pandas as pd
import csv
import unicodecsv
import os
Path = os.getcwd()
primary = (Path + "/primary.csv")
second = (Path + "/second.csv")
third = (Path + "/third.csv")
fourth = (Path + "/fourth.csv")
fifth = (Path + "/fifth.csv")
primary_ = []
with open(primary, encoding='utf-8') as file:
new = csv.reader(file)
for row in new:
primary_.append(row)
prim = pd.DataFrame(primary_)
prim_2 = pd.DataFrame(primary_)
prim_2 = prim_2.rename(columns={0: 'prefName', 1: "locName", 2: 'Label'})
#prim_2[1:60]
second_ = []
with open(second, encoding='utf-8') as file:
new = csv.reader(file)
for row in new:
second_.append(row)
sec_ = pd.DataFrame(second_)
sec_ = sec_.replace([None], [''])
#sec
third_ = []
with open(third, encoding='utf-8') as file:
new = csv.reader(file)
for row in new:
third_.append(row)
thd_ = pd.DataFrame(third_)
thd_ = thd_.replace([None], [''])
#thd
fourth_ = []
with open(fourth, encoding='utf-8') as file:
new = csv.reader(file)
for row in new:
fourth_.append(row)
frt_ = pd.DataFrame(fourth_)
frt_ = frt_.replace([None], [''])
#eT
fifth_ = []
with open(fifth, encoding='utf-8') as file:
new = csv.reader(file)
for row in new:
fifth_.append(row)
fft_ = pd.DataFrame(fifth_)
fft_ = fft_.replace([None], [''])
#fft
mat1 = []
mat2 = []
list1 = prim[1].tolist()
list2 = sec_[0].tolist()
threshold = 100
for i in list1:
mat1.append(process.extract(i, list2, limit=2))
prim['matches'] = mat1
p = []
for j in prim['matches']:
for k in j:
if k[1] >= threshold:
p.append(k[0])
mat2.append(",".join(p))
p = []
prim['matches'] = mat2
match = prim[prim.matches != ""]
match
sec_2 = sec_[sec_[0].isin(match["matches"])]
sec_2 = sec_2.rename(columns={0: 'locName'})
del sec_2[1]
sec_2
mat1 = []
mat2 = []
list1 = prim[1].tolist()
list2 = thd_[0].tolist()
threshold = 100
for i in list1:
mat1.append(process.extract(i, list2, limit=2))
prim['matches'] = mat1
p = []
for j in prim['matches']:
for k in j:
if k[1] >= threshold:
p.append(k[0])
mat2.append(",".join(p))
p = []
prim['matches'] = mat2
match = prim[prim.matches != ""]
match
thd_2 = thd_[thd_[0].isin(match["matches"])]
thd_2 = thd_2.rename(columns={0: 'locName'})
del thd_2[1]
thd_2
mat1 = []
mat2 = []
list1 = prim[1].tolist()
list2 = frt_[0].tolist()
threshold = 100
for i in list1:
mat1.append(process.extract(i, list2, limit=2))
prim['matches'] = mat1
p = []
for j in prim['matches']:
for k in j:
if k[1] >= threshold:
p.append(k[0])
mat2.append(",".join(p))
p = []
prim['matches'] = mat2
match = prim[prim.matches != ""]
match
frt_2 = frt_[frt_[0].isin(match["matches"])]
frt_2 = frt_2.rename(columns={0: 'locName'})
del frt_2[1]
frt_2
mat1 = []
mat2 = []
list1 = prim[1].tolist()
list2 = fft_[0].tolist()
threshold = 100
for i in list1:
mat1.append(process.extract(i, list2, limit=2))
prim['matches'] = mat1
p = []
for j in prim['matches']:
for k in j:
if k[1] >= threshold:
p.append(k[0])
mat2.append(",".join(p))
p = []
prim['matches'] = mat2
match = prim[prim.matches != ""]
match
fft_2 = fft_[fft_[0].isin(match["matches"])]
fft_2 = fft_2.rename(columns={0: 'locName'})
del fft_2[1]
#fft_2
prim_2 = pd.DataFrame(prim_2, columns=["prefName", "locName", "Label"])
df = prim_2.merge(sec_2, how = 'left', left_on=["locName"], right_on=["locName"])
#df[30:80]
df_2 = df.merge(thd_2, how = 'left', left_on=["locName"], right_on=["locName"])
#df_2 = df_2.replace([None], [''])
#df_2[1:60]
df_3 = df_2.merge(frt_2, how = 'left', left_on=["locName"], right_on=["locName"])
#df_3 = df_3.replace([None], [''])
#df_3[1:60]
df_4 = df_3.merge(fft_2, how = 'left', left_on=["locName"], right_on=["locName"])
#df_4[1:60]
def justify(a, invalid_val=0, axis=1, side='left'):
if invalid_val is np.nan:
mask = pd.notnull(a)
else:
mask = a!=invalid_val
justified_mask = np.sort(mask,axis=axis)
if (side=='up') | (side=='left'):
justified_mask = np.flip(justified_mask,axis=axis)
out = np.full(a.shape, invalid_val, dtype=object)
if axis==1:
out[justified_mask] = a[mask]
else:
out.T[justified_mask.T] = a.T[mask.T]
return out
import numpy as np
arr = justify(df_4.to_numpy(), invalid_val=np.nan)
df_4 = pd.DataFrame(arr).dropna(axis=1, how='all')
#df_2 = df_2.replace([None], [''])
df_4 = df_4.rename(columns={0: 'prefName', 1: "locName",2: "Label"})
del df_4["locName"]
df_4[1:60]
df_4.to_csv(Path + "/final_file.csv", index=False, encoding='utf-8-sig')
| 0.193833 | 0.522019 |
# Análisis Exploratorio
Grupo: Biblion'Data <br/>
Integrantes:
- Luis Fiallos
- Eddy Calderón
- Erick Pulla
```
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import pycountry
from itertools import combinations
import networkx as nx
%matplotlib inline
df = pd.read_csv('papersDataScience.csv')
```
<strong>Datos de PIB Mundial extraidos de WorldBank.org </strong> <br />
Link: https://datos.bancomundial.org/indicador/NY.GDP.MKTP.CD
```
df_pib = pd.read_csv('PIB_per_Country.csv')
```
## Exploración de los Datasets
### Dataset de papers
```
# Las primeras 5 rows del dataset de papers
df.head()
# Información acerca de las columnas o features del dataset de papers (tipos de datos)
df.info()
# Columnas del dataset de papers
df.columns
```
### Dataset de PIB
```
# Primeras 5 filas del dataset del PIB
df_pib.head(5)
```
Para simplificar el dataset, solo usaremos las columnas Country Name, Country Code, 2018, 2019 y 2020 (las columnas de los años indican el PIB por año)
```
df_pib = df_pib[["Country Name", "Country Code", "2018", "2019", "2020"]]
df_pib.head(5)
# Columnas del dataset de PIB
df_pib.info()
```
## Overview de los datasets
### Dataset de papers
```
# Features destacados
df[['doi','title','subtype','subtypeDescription','creator','affilname','affiliation_city','affiliation_country',
'author_count','author_names','coverDate','coverDisplayDate','publicationName','aggregationType','authkeywords','citedby_count',
'openaccess','year']].head()
(df[["title", "year"]].groupby("year").count() / len(df) * 100).plot.pie(autopct="%.2f%%", subplots=True)
plt.ylabel("Porcentaje de los papers por año")
```
El 54.2% de los datos pertenecen a papers del 2020, mientras que el 45.8% de los papers pertenecen al 2019.
```
percentage_by_type = df[["title", "aggregationType"]].groupby("aggregationType").count() / len(df) * 100
percentage_by_type
```
Como se puede observar tanto en la tabla de arriba como el gráfico de abajo, la mayoría de los papers (más del 70%) de los papers son de tipo <strong>Journal</strong>.
```
percentage_by_type.plot.pie(autopct="%.2f%%", subplots=True)
plt.ylabel("Porcentaje de los tipos de documentos")
df_countries = df.explode("affiliation_country", ignore_index=True)
df_countries[["affiliation_country"]]
percentage_by_country = df_countries[["title", "affiliation_country"]].groupby("affiliation_country").count() / len(df_countries) * 100
percentage_by_country.sort_values(by="title", inplace=True, ascending=False)
percentage_by_country
```
La mayor cantidad de papers contienen autores que se ubican principalmente en Estados Unidos, con una cantidad aproximada de 22.1%
### Dataset de PIB
```
df_pib.isna().sum()
```
Se observa que en el dataset de los PIBs mundiales, todos los países poseen su código (Country Code), 2 países no poseen su nombre (Country Name). <br />
Para los PIBs, en el 2018 hay 12 datos faltantes, para el 2019, hay 19 datos faltantes y para el 2020, hay 43 datos faltantes para ciertos países.
```
df_pib[df_pib.isna().any(axis=1)]
df_pib=df_pib.dropna(subset=["Country Name","2018",'2019', '2020'])
df_pib
# Le hacemos un melt al dataset de PIB para tener en una sola columna
df_pib_melted = df_pib.melt(id_vars=["Country Name", "Country Code"], value_vars=["2018", "2019", "2020"], var_name="year", value_name="PIB")
df_pib_melted
countries = list(pycountry.countries)
country_codes = []
for country in countries:
country_codes.append(country.alpha_3)
country_codes[:10]
def drop_not_countries(x):
if x["Country Code"] in country_codes:
return x
df_pib_melted = df_pib_melted.transform(drop_not_countries, axis=1)
df_pib_melted = df_pib_melted.dropna(subset=["Country Name"])
df_pib_melted.groupby(["Country Code","year"]).head()
sns.barplot(data=df_pib_melted.sort_values(by="PIB", ascending=False).head(39), x="Country Code", y="PIB", hue="year")
plt.title("Top 13 países con mayor PIB entre el 2018, 2019 y 2020")
```
Se puede observar que el PIB más alto lo posee USA con un valor mayor al 25% del segundo mejor país tanto para los años 2018, 2019 y 2020
## Análisis de outliers
```
df.author_count.quantile(0.25)
df.author_count.quantile(0.75)
outliers_author_count=df[['title','author_count']].loc[df['author_count']>df.author_count.quantile(0.75)+1.5*(df.author_count.quantile(0.75)-df.author_count.quantile(0.25))]
outliers_author_count
porcentaje_out_author=outliers_author_count.title.count()/df.title.count()*100
porcentaje_out_author
```
El 3.4 % de los registros del dataset son outliers para la variable author_count
```
df.citedby_count.quantile(0.25)
df.citedby_count.quantile(0.75)
outliers_citedby_count=df[['title','citedby_count']].loc[df['citedby_count']>df.citedby_count.quantile(0.75)+1.5*(df.citedby_count.quantile(0.75)-df.citedby_count.quantile(0.25))]
outliers_citedby_count
porcentaje_out_citedby=outliers_citedby_count.title.count()/df.title.count()*100
porcentaje_out_citedby
```
El 8.3 % de los registros del dataset son outliers para la variable citedby_count
## Análisis de variables categóricas
### Pregunta 1: ¿Como ha variado la publicación de papers del año 2019 al 2020 según el tipo de documento publicado?
```
aggre_values=pd.unique(df['aggregationType'])
# Revisamos nuestras principales variables categoricas las cuales clasifican cada paper por su tipo
cuenta_agrtype=df[['aggregationType','year','title']].groupby(['aggregationType','year']).count()
cuenta_agrtype
cuenta_type=df[['aggregationType','year','title']].groupby(['aggregationType','year']).count().reset_index()
sns.barplot(data=cuenta_type, x="aggregationType", y="title", hue="year")
plt.title("Producción de papers según tipo de documento")
plt.ylabel("Count")
```
Respuesta(s):
- Se puede observar que el tipo de documento en el que hay más instancias de papers es el Journal tanto para el 2019 como para el 2020.
- En cuanto a la variación, se puede observar que todos han tenido un aumento en cuanto a la producción de papers, excepto el tipo de documento Book y Trade Journal.
- El tipo de documento que más afectación tuvo fue Journal, con un porcentaje de aumento mayor al 30%.
```
porcentajes=[]
for tipo in aggre_values:
x=100*(cuenta_agrtype.loc[(tipo,2020),'title']-cuenta_agrtype.loc[(tipo,2019),'title'])/cuenta_agrtype.loc[(tipo,2019),'title']
porcentajes.append(x)
porcentajes
df_porcentajes=pd.DataFrame(porcentajes)
df_porcentajes['Tipo']=aggre_values
df_porcentajes
cuenta_subtype=df[['subtypeDescription','year','title']].groupby(['subtypeDescription','year']).count()
cuenta_subtype
```
### Pregunta 2: ¿Qué países presentan una mayor afectación en la publicación de papers del último año tras los sucesos de la pandemia del COVID-19?
```
def split(x):
if pd.isnull(x):
return []
return str(x).split(";")
df["affiliation_country"] = df["affiliation_country"].apply(split)
df[["affiliation_country"]]
```
<h3>Obtenemos el país de publicación:</h3>
Lo guardamos en el campo "published_country"
```
def get_published_country(x):
if len(x) != 0:
return x[0]
df["published_country"] = df["affiliation_country"].apply(get_published_country)
df[["published_country"]]
groupbycountry = df[["published_country", "year", "title"]].groupby(["published_country", "year"]).count().sort_values(by="title", ascending=False).reset_index()
groupbycountry.head(10)
# ¿Qué países presentan una mayor afectación en la publicación de papers del último año tras los sucesos de la pandemia del COVID-19?
groupbycountry = df[["published_country", "year", "title"]].groupby(["published_country", "year"]).count().sort_values(by="title", ascending=False).reset_index()
sns.barplot(data=groupbycountry.head(12), x="published_country", y="title", hue="year")
plt.title("Top 5 países con mayor número de publicaciones de papers")
plt.ylabel("Count")
```
Respuesta(s):
- Se incrementó entre un 3% a un 8% el número de publicaciones en países como USA, UK, Iran, India y Alemania
- Podemos evidenciar un gran aumento de publicaciones de papers en China, país que se tornó el centro de la pandemia.
### Pregunta 3: ¿De qué manera cambia el número de papers publicados a lo largo de los meses y cómo esto cambia del año 2019 al 2020? ¿Hay algún cambio significativo?
```
df["month"] = pd.to_datetime(df["coverDate"]).dt.month_name()
df.head()
groupby_month = df[["month", "year", "title"]].groupby(["year", "month"]).count().sort_values(by="title", ascending=False).reset_index()
groupby_month
sns.barplot(data=groupby_month, x="title", y="month", hue="year")
plt.xlabel("count")
plt.title("Número de papers publicados por mes")
```
Respuesta(s): <br/>
- Del año 2019 al 2020 existe un incremento en la producción de papers de aproximadamente 20% en promedio.
- Adicionalmente, se puede observar como Enero es el mes de mayor producción durante los 2 años, ocupando más del 70% de las publicaciones anuales, mientras que el resto de meses mantienen una produccion similar con un valor alrededor de las 2000 publicaciones por mes.
### Utilizando geopandas (en construcción)
```
import pycountry
def alpha3code(column):
CODE=[]
for country in column:
try:
code=pycountry.countries.get(name=country).alpha_3 # .alpha_3 means 3-letter country code
CODE.append(code)
except:
CODE.append('None')
return CODE
df_countries['CODE'] = alpha3code(df_countries["affiliation_country"])
df_countries[["affiliation_country", "CODE"]].head()
# Extrayendo los puntos de geometria y latitud y longitud
import geopandas
world = geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres'))
world.columns=['pop_est', 'continent', 'name', 'CODE', 'gdp_md_est', 'geometry']
# Hacemos un merge con nuestros datos para obtener la geometria de los paises
merge = pd.merge(world, df_countries, on="CODE")
merge[['continent', 'name', 'CODE', 'gdp_md_est', 'geometry', 'affiliation_country']].head()
# Hacemos un merge con un dataset externo para obtener la latitud y longitud
location=pd.read_csv('https://raw.githubusercontent.com/melanieshi0120/COVID-19_global_time_series_panel_data/master/data/countries_latitude_longitude.csv')
merge=merge.merge(location,on='name')
merge[['continent', 'name', 'CODE', 'gdp_md_est', 'geometry', 'latitude', 'longitude']].head()
# Obtenemos el conteo por países para el año 2020
new_merge = merge[["CODE", 'title']].groupby("CODE").count().reset_index().merge(merge, on="CODE")
new_merge = new_merge[['continent', 'name', 'CODE', 'gdp_md_est', 'geometry', 'latitude', 'longitude', 'title_x']]
new_merge.drop_duplicates(subset=['CODE'])
new_merge = new_merge.sort_values(by="title_x", ascending=False).reset_index()
from geopandas import GeoDataFrame
new_merge = GeoDataFrame(new_merge)
new_merge.plot(column="title_x", scheme="quantiles",
figsize=(25, 20),
legend=True,cmap='coolwarm')
plt.title('Producción de papers por País durante 2019 y 2020',fontsize=25)
for i in range(0,10):
plt.text(float(new_merge.longitude[i]),float(new_merge.latitude[i]),"{}\n{}".format(new_merge.name[i],new_merge.title_x[i]),size=10)
plt.show()
```
Se puede observar los países que más producción de papers han tenido durante los años 2019 y 2020 en un mapa de colores.
## Análisis de variables numéricas
```
# Realizamos un revision preliminar sobre la informacion estadistica de nuestros datos numericos
df.describe()
# Analizamos como se comporta la variable author_count respecto al tipo de documento y año de publicacion
sns.boxplot(x='subtype',y='author_count',hue='year',data=df)
sns.boxplot(x='subtype',y='author_count',hue='year',data=df)
```
<strong> Se puede observar que existe una gran cantidad de outliers en nuestro primer analisis exploratorio de datos <strong/>
```
#Analizamos como se comporta la variable citedby_count respecto al tipo de documento y año de publicacion
sns.boxplot(x='subtype',y='citedby_count',hue='year',data=df)
sns.boxplot(x='aggregationType',y='citedby_count',hue='year',data=df)
```
<strong>Para esta variable tambien se aprecia una cantidad considerable de outliers</strong>
```
sns.displot(df['aggregationType'])
```
<strong>El tipo de documento que mayor número de instancias tiene son los de tipo Journal</strong>
```
g=sns.FacetGrid(df,col='year')
g.map(sns.histplot,'aggregationType')
```
<strong>Los tipos de documentos siguen la misma distribución en los años 2019 y 2020, aunque presentan mayor cantidad en el año 2020</strong>
```
sns.displot(df['subtype'])
```
<strong>El subtipo de documento que mayor número de instancias tiene son los de tipo AR</strong>
```
gr=sns.FacetGrid(df,col='year')
gr.map(sns.histplot,'subtype')
```
<strong>Los subtipos de documentos siguen la misma distribución en los años 2019 y 2020, aunque presentan mayor cantidad en el año 2020</strong>
```
g=sns.FacetGrid(df,col='openaccess')
g.map(sns.histplot,'aggregationType')
gr=sns.FacetGrid(df,col='openaccess')
gr.map(sns.histplot,'subtype')
```
<strong>Basado en la gran cantidad de outliers presentes en las variables numericas surgió el interés de conocer cual fue el paper más citado por año, asi se obtuvo el siguiente resultado <strong/>
```
indices=df.groupby(['year'])['citedby_count'].transform(max)==df['citedby_count']
df[indices]
```
<strong>Como se observa en el año 2020 el paper más citado fue 'Clinical features of patients infected with 2019 novel coronavirus in Wuhan, China (thelancet.com)', un tema que se ha vuelto popular debido a la pandemia del COVID-19 y que supera en gran medida el número de citas realizadas respecto al año anterior <strong/>
## Grafos
### Pregunta 4: Si se hace un grafo de los keywords que co-ocurren dentro de publicaciones del dataset, ¿Cuáles han sido los principales enfoques de investigación dentro de los últimos meses?
### Analisis Grafo: Alemania
```
keywords_GER = [] # Nodos de los keywords de Alemania
keywords_GER_edges = [] # Edges de Alemania
for k in df.loc[df["published_country"]=="Germany", "authkeywords"].tolist():
if pd.isna(k):
continue
k = [x.strip().lower() for x in k.split("|")]
keywords_GER_edges += list(combinations(k, 2))
keywords_GER_edges
keyword_graph_GER = nx.Graph()
keyword_graph_GER.add_edges_from(keywords_GER_edges)
# Clustering coeficient
cof_real_GER = nx.average_clustering(keyword_graph_GER)
print(f'Real: ', cof_real_GER)
list_tuples = nx.betweenness_centrality(keyword_graph_GER, normalized=True).items()
sort_tuples = sorted(list_tuples, key=lambda x: x[1], reverse=True)
for i in sort_tuples[:10]:
print('{0:4s}: {1:>}'.format(i[0], i[1]))
```
<strong>Basados en el grafo de Alemania podemos observar que las 5 primeras keywords de acuerdo al analisis de betweness centrality son Machine Learning, Data Science, Big Data, Artificial Intelligence y Open Science. Por lo tanto, los principipales temas de investigacion de data science giran en torno a topicos como el machine learning, big data y artificial intelligence. Otro punto curioso en ese ranking es la presencia de Open Science, lo cual significa que una considerable proporcion de los documentos cientificos publicados son de acceso libre para la comunidad cientifica, lo cual es un movimiento impulsado dentro de la comunidad europa.<strong/>
### Analisis Grafo: Estados Unidos
```
keywords_USA = [] # Nodos de los keywords de Alemania
keywords_USA_edges = [] # Edges de Alemania
for k in df.loc[df["published_country"]=="United States", "authkeywords"].tolist():
if pd.isna(k):
continue
k = [x.strip().lower() for x in k.split("|")]
keywords_USA_edges += list(combinations(k, 2))
keywords_USA_edges
keyword_graph_USA = nx.Graph()
keyword_graph_USA.add_edges_from(keywords_USA_edges)
nx.write_gexf(keyword_graph_USA,"keywords_USA.gexf")
# Clustering coeficient
cof_real_USA = nx.average_clustering(keyword_graph_USA)
print(f'Real: ', cof_real_USA)
list_tuples_USA = nx.betweenness_centrality(keyword_graph_USA, normalized=True).items()
sort_tuples_USA = sorted(list_tuples_USA, key=lambda x: x[1], reverse=True)
```
<strong> Dado que el mayor numero de publicaciones provienen de Estados Unidos, el grafo obtenido poseia una mayor complejidad por lo que los tiempos computacionales eran mucho mas amplios y dejamos este analisis para el final<strong/>
### Analisis Grafo: India
```
keywords_IND = [] # Nodos de los keywords de India
keywords_IND_edges = [] # Edges de India
for k in df.loc[df["published_country"]=="India", "authkeywords"].tolist():
if pd.isna(k):
continue
k = [x.strip().lower() for x in k.split("|")]
keywords_IND_edges += list(combinations(k, 2))
keywords_IND_edges
keyword_graph_IND = nx.Graph()
keyword_graph_IND.add_edges_from(keywords_IND_edges)
nx.write_gexf(keyword_graph_IND,"keywords_IND.gexf")
# Clustering coeficient
cof_real_IND = nx.average_clustering(keyword_graph_IND)
print(f'Real: ', cof_real_IND)
list_tuples_IND = nx.betweenness_centrality(keyword_graph_IND, normalized=True).items()
sort_tuples_IND = sorted(list_tuples_IND, key=lambda x: x[1], reverse=True)
for i in sort_tuples_IND[:10]:
print('{0:4s}: {1:>}'.format(i[0], i[1]))
```
<strong>Observando los resultados de betweenness centrality para el grafo de India, podemos observar que ciertas keywords se mantienen alto dentro del ranking, tal como Machine Learning, Data Science y Big Data. Sin embargo, se pueden encontrar otro tipo de topicos de estudio mas abajo en la clasificacion como Covid-19 lo cual se ha vuelto un tema de interes en los ultimos meses<strong/>
### Analisis Grafo: Reino Unido
```
keywords_UK = [] # Nodos de los keywords de India
keywords_UK_edges = [] # Edges de India
for k in df.loc[df["published_country"]=="India", "authkeywords"].tolist():
if pd.isna(k):
continue
k = [x.strip().lower() for x in k.split("|")]
keywords_UK_edges += list(combinations(k, 2))
keywords_UK_edges
keyword_graph_UK = nx.Graph()
keyword_graph_UK.add_edges_from(keywords_UK_edges)
# nx.write_gexf(keyword_graph_IND,"keywords_IND.gexf")
# Clustering coeficient
cof_real_UK = nx.average_clustering(keyword_graph_UK)
print(f'Real: ', cof_real_UK)
```
Resultados Betweenness Centrality Reino Unido <br>
systematic review: 0.14645333078394285<br>
citizen science: 0.0977697204817174<br>
machine learning: 0.08082675603468793<br>
data science: 0.07765026543608962<br>
big data: 0.04227129583279022<br>
meta-analysis: 0.03206767065810722<br>
qualitative research: 0.02692078007964253<br>
artificial intelligence: 0.025744322483237513<br>
covid-19: 0.02413534533225036<br>
epidemiology: 0.022744714680004274<br>
<strong>De los resultados del grafo obtenido de Reino Unido podemos observar que nuevamente se repiten los temas de Machine Learning, Data Science y Big Data. Sin embargo, estos 3 topicos se encuentran por debajo de Systematic Review y Citizen Science, los cuales son temas muy relacionados al ambito de la salud y que consisten en elaborar un analisis sobre estudios clinicos previamente realizados<strong/>
### Analisis Grafo: Iran
```
keywords_IRN = [] # Nodos de los keywords de Iran
keywords_IRN_edges = [] # Edges de Iran
for k in df.loc[df["published_country"]=="Iran", "authkeywords"].tolist():
if pd.isna(k):
continue
k = [x.strip().lower() for x in k.split("|")]
keywords_IRN_edges += list(combinations(k, 2))
keywords_IRN_edges
keyword_graph_IRN = nx.Graph()
keyword_graph_IRN.add_edges_from(keywords_IRN_edges)
nx.write_gexf(keyword_graph_IRN,"keywords_IRN.gexf")
# Clustering coeficient
cof_real_IRN = nx.average_clustering(keyword_graph_IRN)
print(f'Real: ', cof_real_IRN)
list_tuples_IRN = nx.betweenness_centrality(keyword_graph_IRN, normalized=True).items()
sort_tuples_IRN = sorted(list_tuples_IRN, key=lambda x: x[1], reverse=True)
for i in sort_tuples_IRN[:10]:
print('{0:4s}: {1:>}'.format(i[0], i[1]))
```
<strong> Obteniendo los resultados de Iran podemos observar que los principales topicos de investigacion difieren en gran medida respecto al resto de paises analizados. Tomando el top 5 podemos observar que los principales temas de investigacion se han enfocado en meta-analysis, systematic review, covid-19 y students. Esto nos permite concluir que gran parte de las investigaciones de data science se han enfocado en el ambito de la salud, lo cual difiere con los otros analisis realizados en otros paises donde la investigacion se ha dedicado a temas mas afines al tema central de data science<strong/>
### Analisis Grafo: China
```
keywords_CHI = [] # Nodos de los keywords de Alemania
keywords_CHI_edges = [] # Edges de Alemania
for k in df.loc[df["published_country"]=="China", "authkeywords"].tolist():
if pd.isna(k):
continue
k = [x.strip().lower() for x in k.split("|")]
keywords_CHI_edges += list(combinations(k, 2))
keywords_CHI_edges
keyword_graph_CHI = nx.Graph()
keyword_graph_CHI.add_edges_from(keywords_CHI_edges)
nx.write_gexf(keyword_graph_CHI,"keywords_CHI.gexf")
# Clustering coeficient
cof_real_CHI = nx.average_clustering(keyword_graph_CHI)
print(f'Real: ', cof_real_CHI)
list_tuples_CHI = nx.betweenness_centrality(keyword_graph_CHI, normalized=True).items()
sort_tuples_CHI = sorted(list_tuples_CHI, key=lambda x: x[1], reverse=True)
for i in sort_tuples_CHI[:10]:
print('{0:4s}: {1:>}'.format(i[0], i[1]))
```
<strong> Similar al analisis realizado en los anteriores paises, los principales temas de investigacion se centran en Machine Learning y Big Data. Adicionalmente, podemos observar que hay un tema central en la mayor parte de los articulos, el cual es el meta-analysis, lo cual implica que gran parte de los articulos dedicados a Data Science en China se han enfocado en el ambito de la medicina y los estudios clinicos<strong/>
## PIB análisis
```
df_pib_melted[df_pib_melted["Country Name"]=="Alemania"]
pib_graph_countries = df_pib_melted.loc[((df_pib_melted["Country Code"]=="CHN")|(df_pib_melted["Country Code"]=="IRN")|(df_pib_melted["Country Code"]=="IND")|(df_pib_melted["Country Code"]=="GBR")|(df_pib_melted["Country Code"]=="DEU"))&(df_pib_melted["year"]!="2018")].sort_values(by="PIB", ascending=False)
sns.barplot(data=pib_graph_countries, x="Country Name", y="PIB", hue="year")
plt.title("PIB de los países analizados con grafos")
c = ["CHN", "IRN", "IND", "DEU", "GBR"]
c1 = [["CHN", cof_real_CHI], ["IRN", cof_real_IRN], ["IND", cof_real_IND], ["DEU", cof_real_GER], ["GBR", cof_real_UK]]
# pib_avg_coeff = np.array([c1, columns=["Country Code", "clustering_coefficient"])
pib_avg_coeff = pd.DataFrame(data=c1, columns=["Country Code", "clustering_coefficient"])
pib_avg_coeff
pib_graph_countries
pib_clust_coef = pd.merge(pib_graph_countries, pib_avg_coeff, on="Country Code")
pib_clust_coef
sns.barplot(data=pib_clust_coef, x="Country Name", y="PIB")
sns.barplot(data=pib_clust_coef, x="Country Name", y="clustering_coefficient")
```
<strong>Podemos observar que el gráfico de Clustering Coefficient no hay un cambio significativo entre países, mientras que en el gráfico de PIB, claramente se ve una gran diferencia entre países para el 2020. Por lo cual, podremos concluir que el clustering coefficient no se ve afectado por la situación económica del país como lo muestra el PIB. </strong>
## Continuación con grafos
### Pregunta 5: Si se hace un grafo por grupos de años de publicaciones de los keywords que co-ocurren en el dataset, ha cambiado ese grafo, hay cambios relacionados a la pandemia?
### Análisis Grafo China
<strong>2019</strong>
```
keywords_CHI_edges_2019 = [] # Edges de Alemania
for k in df.loc[(df["published_country"]=="China")&(df["year"]==2019), "authkeywords"].tolist():
if pd.isna(k):
continue
k = [x.strip().lower() for x in k.split("|")]
keywords_CHI_edges_2019 += list(combinations(k, 2))
keywords_CHI_edges_2019
keyword_graph_CHI_2019 = nx.Graph()
keyword_graph_CHI_2019.add_edges_from(keywords_CHI_edges_2019)
nx.write_gexf(keyword_graph_CHI_2019,"keywords_CHI_2019.gexf")
# Clustering coeficient
cof_real_CHI_2019 = nx.average_clustering(keyword_graph_CHI_2019)
print(f'Real: ', cof_real_CHI_2019)
list_tuples_CHI_2019 = nx.betweenness_centrality(keyword_graph_CHI_2019, normalized=True).items()
sort_tuples_CHI_2019 = sorted(list_tuples_CHI_2019, key=lambda x: x[1], reverse=True)
for i in sort_tuples_CHI_2019[:10]:
print('{0:4s}: {1:>}'.format(i[0], i[1]))
```
<strong>2020</strong>
```
keywords_CHI_edges_2020 = [] # Edges de Alemania
for k in df.loc[(df["published_country"]=="China")&(df["year"]==2020), "authkeywords"].tolist():
if pd.isna(k):
continue
k = [x.strip().lower() for x in k.split("|")]
keywords_CHI_edges_2020 += list(combinations(k, 2))
keywords_CHI_edges_2020
keyword_graph_CHI_2020 = nx.Graph()
keyword_graph_CHI_2020.add_edges_from(keywords_CHI_edges_2020)
nx.write_gexf(keyword_graph_CHI_2020,"keywords_CHI_2020.gexf")
# Clustering coeficient
cof_real_CHI_2020 = nx.average_clustering(keyword_graph_CHI_2020)
print(f'Real: ', cof_real_CHI_2020)
list_tuples_CHI_2020 = nx.betweenness_centrality(keyword_graph_CHI_2020, normalized=True).items()
sort_tuples_CHI_2020 = sorted(list_tuples_CHI_2020, key=lambda x: x[1], reverse=True)
for i in sort_tuples_CHI_2020[:10]:
print('{0:4s}: {1:>}'.format(i[0], i[1]))
```
## 2019:
- meta-analysis: 0.20583374148374659
- systematic review: 0.051454683181564737
- china: 0.05093023429952948
- big data: 0.04796853379003524
- machine learning: 0.04398983257082266
- deep learning: 0.036255119884724615
- bibliometrics: 0.02378527313359049
- citespace: 0.022268057792981553
- artificial intelligence: 0.0218016059508896
- bibliometric analysis: 0.02118826378168405
## 2020:
- meta-analysis: 0.18801061050539705
- big data: 0.07317846602644962
- machine learning: 0.054879652902832886
- data mining: 0.053386064455062214
- deep learning: 0.05162811668833398
- china: 0.041678010791306765
- systematic review: 0.04041846173221223
- covid-19: 0.03717052167882441
- artificial intelligence: 0.02227624377588648
- bibliometric analysis: 0.01634688839435912
<strong> Podemos observar que entre 2019 y 2020 el principal tema en el que se centran las investigaciones corresponde al Meta-analysis el cual se ha mantenido en el primer lugar para ambos años. Como se ha discutido antes es un tema que se encuentra bastante relacionado al ambito de salud y social, por lo que se puede deducir que la mayor parte de las investigaciones en China se enfocan en esa area. Sin embargo, bajando ciertos puestos en ese ranking podemos observar como la tematica de bibliometrics se ve desplazada del foco de investigaciones por temas de Epidemiologia y Covid-19. Este ligero cambio se puede interpretar como una consecuencia de los inicios de la pandemia, razon por la cual las investigaciones comienzan a girar en torno al tema del Covid-19<strong/>
|
github_jupyter
|
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import pycountry
from itertools import combinations
import networkx as nx
%matplotlib inline
df = pd.read_csv('papersDataScience.csv')
df_pib = pd.read_csv('PIB_per_Country.csv')
# Las primeras 5 rows del dataset de papers
df.head()
# Información acerca de las columnas o features del dataset de papers (tipos de datos)
df.info()
# Columnas del dataset de papers
df.columns
# Primeras 5 filas del dataset del PIB
df_pib.head(5)
df_pib = df_pib[["Country Name", "Country Code", "2018", "2019", "2020"]]
df_pib.head(5)
# Columnas del dataset de PIB
df_pib.info()
# Features destacados
df[['doi','title','subtype','subtypeDescription','creator','affilname','affiliation_city','affiliation_country',
'author_count','author_names','coverDate','coverDisplayDate','publicationName','aggregationType','authkeywords','citedby_count',
'openaccess','year']].head()
(df[["title", "year"]].groupby("year").count() / len(df) * 100).plot.pie(autopct="%.2f%%", subplots=True)
plt.ylabel("Porcentaje de los papers por año")
percentage_by_type = df[["title", "aggregationType"]].groupby("aggregationType").count() / len(df) * 100
percentage_by_type
percentage_by_type.plot.pie(autopct="%.2f%%", subplots=True)
plt.ylabel("Porcentaje de los tipos de documentos")
df_countries = df.explode("affiliation_country", ignore_index=True)
df_countries[["affiliation_country"]]
percentage_by_country = df_countries[["title", "affiliation_country"]].groupby("affiliation_country").count() / len(df_countries) * 100
percentage_by_country.sort_values(by="title", inplace=True, ascending=False)
percentage_by_country
df_pib.isna().sum()
df_pib[df_pib.isna().any(axis=1)]
df_pib=df_pib.dropna(subset=["Country Name","2018",'2019', '2020'])
df_pib
# Le hacemos un melt al dataset de PIB para tener en una sola columna
df_pib_melted = df_pib.melt(id_vars=["Country Name", "Country Code"], value_vars=["2018", "2019", "2020"], var_name="year", value_name="PIB")
df_pib_melted
countries = list(pycountry.countries)
country_codes = []
for country in countries:
country_codes.append(country.alpha_3)
country_codes[:10]
def drop_not_countries(x):
if x["Country Code"] in country_codes:
return x
df_pib_melted = df_pib_melted.transform(drop_not_countries, axis=1)
df_pib_melted = df_pib_melted.dropna(subset=["Country Name"])
df_pib_melted.groupby(["Country Code","year"]).head()
sns.barplot(data=df_pib_melted.sort_values(by="PIB", ascending=False).head(39), x="Country Code", y="PIB", hue="year")
plt.title("Top 13 países con mayor PIB entre el 2018, 2019 y 2020")
df.author_count.quantile(0.25)
df.author_count.quantile(0.75)
outliers_author_count=df[['title','author_count']].loc[df['author_count']>df.author_count.quantile(0.75)+1.5*(df.author_count.quantile(0.75)-df.author_count.quantile(0.25))]
outliers_author_count
porcentaje_out_author=outliers_author_count.title.count()/df.title.count()*100
porcentaje_out_author
df.citedby_count.quantile(0.25)
df.citedby_count.quantile(0.75)
outliers_citedby_count=df[['title','citedby_count']].loc[df['citedby_count']>df.citedby_count.quantile(0.75)+1.5*(df.citedby_count.quantile(0.75)-df.citedby_count.quantile(0.25))]
outliers_citedby_count
porcentaje_out_citedby=outliers_citedby_count.title.count()/df.title.count()*100
porcentaje_out_citedby
aggre_values=pd.unique(df['aggregationType'])
# Revisamos nuestras principales variables categoricas las cuales clasifican cada paper por su tipo
cuenta_agrtype=df[['aggregationType','year','title']].groupby(['aggregationType','year']).count()
cuenta_agrtype
cuenta_type=df[['aggregationType','year','title']].groupby(['aggregationType','year']).count().reset_index()
sns.barplot(data=cuenta_type, x="aggregationType", y="title", hue="year")
plt.title("Producción de papers según tipo de documento")
plt.ylabel("Count")
porcentajes=[]
for tipo in aggre_values:
x=100*(cuenta_agrtype.loc[(tipo,2020),'title']-cuenta_agrtype.loc[(tipo,2019),'title'])/cuenta_agrtype.loc[(tipo,2019),'title']
porcentajes.append(x)
porcentajes
df_porcentajes=pd.DataFrame(porcentajes)
df_porcentajes['Tipo']=aggre_values
df_porcentajes
cuenta_subtype=df[['subtypeDescription','year','title']].groupby(['subtypeDescription','year']).count()
cuenta_subtype
def split(x):
if pd.isnull(x):
return []
return str(x).split(";")
df["affiliation_country"] = df["affiliation_country"].apply(split)
df[["affiliation_country"]]
def get_published_country(x):
if len(x) != 0:
return x[0]
df["published_country"] = df["affiliation_country"].apply(get_published_country)
df[["published_country"]]
groupbycountry = df[["published_country", "year", "title"]].groupby(["published_country", "year"]).count().sort_values(by="title", ascending=False).reset_index()
groupbycountry.head(10)
# ¿Qué países presentan una mayor afectación en la publicación de papers del último año tras los sucesos de la pandemia del COVID-19?
groupbycountry = df[["published_country", "year", "title"]].groupby(["published_country", "year"]).count().sort_values(by="title", ascending=False).reset_index()
sns.barplot(data=groupbycountry.head(12), x="published_country", y="title", hue="year")
plt.title("Top 5 países con mayor número de publicaciones de papers")
plt.ylabel("Count")
df["month"] = pd.to_datetime(df["coverDate"]).dt.month_name()
df.head()
groupby_month = df[["month", "year", "title"]].groupby(["year", "month"]).count().sort_values(by="title", ascending=False).reset_index()
groupby_month
sns.barplot(data=groupby_month, x="title", y="month", hue="year")
plt.xlabel("count")
plt.title("Número de papers publicados por mes")
import pycountry
def alpha3code(column):
CODE=[]
for country in column:
try:
code=pycountry.countries.get(name=country).alpha_3 # .alpha_3 means 3-letter country code
CODE.append(code)
except:
CODE.append('None')
return CODE
df_countries['CODE'] = alpha3code(df_countries["affiliation_country"])
df_countries[["affiliation_country", "CODE"]].head()
# Extrayendo los puntos de geometria y latitud y longitud
import geopandas
world = geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres'))
world.columns=['pop_est', 'continent', 'name', 'CODE', 'gdp_md_est', 'geometry']
# Hacemos un merge con nuestros datos para obtener la geometria de los paises
merge = pd.merge(world, df_countries, on="CODE")
merge[['continent', 'name', 'CODE', 'gdp_md_est', 'geometry', 'affiliation_country']].head()
# Hacemos un merge con un dataset externo para obtener la latitud y longitud
location=pd.read_csv('https://raw.githubusercontent.com/melanieshi0120/COVID-19_global_time_series_panel_data/master/data/countries_latitude_longitude.csv')
merge=merge.merge(location,on='name')
merge[['continent', 'name', 'CODE', 'gdp_md_est', 'geometry', 'latitude', 'longitude']].head()
# Obtenemos el conteo por países para el año 2020
new_merge = merge[["CODE", 'title']].groupby("CODE").count().reset_index().merge(merge, on="CODE")
new_merge = new_merge[['continent', 'name', 'CODE', 'gdp_md_est', 'geometry', 'latitude', 'longitude', 'title_x']]
new_merge.drop_duplicates(subset=['CODE'])
new_merge = new_merge.sort_values(by="title_x", ascending=False).reset_index()
from geopandas import GeoDataFrame
new_merge = GeoDataFrame(new_merge)
new_merge.plot(column="title_x", scheme="quantiles",
figsize=(25, 20),
legend=True,cmap='coolwarm')
plt.title('Producción de papers por País durante 2019 y 2020',fontsize=25)
for i in range(0,10):
plt.text(float(new_merge.longitude[i]),float(new_merge.latitude[i]),"{}\n{}".format(new_merge.name[i],new_merge.title_x[i]),size=10)
plt.show()
# Realizamos un revision preliminar sobre la informacion estadistica de nuestros datos numericos
df.describe()
# Analizamos como se comporta la variable author_count respecto al tipo de documento y año de publicacion
sns.boxplot(x='subtype',y='author_count',hue='year',data=df)
sns.boxplot(x='subtype',y='author_count',hue='year',data=df)
#Analizamos como se comporta la variable citedby_count respecto al tipo de documento y año de publicacion
sns.boxplot(x='subtype',y='citedby_count',hue='year',data=df)
sns.boxplot(x='aggregationType',y='citedby_count',hue='year',data=df)
sns.displot(df['aggregationType'])
g=sns.FacetGrid(df,col='year')
g.map(sns.histplot,'aggregationType')
sns.displot(df['subtype'])
gr=sns.FacetGrid(df,col='year')
gr.map(sns.histplot,'subtype')
g=sns.FacetGrid(df,col='openaccess')
g.map(sns.histplot,'aggregationType')
gr=sns.FacetGrid(df,col='openaccess')
gr.map(sns.histplot,'subtype')
indices=df.groupby(['year'])['citedby_count'].transform(max)==df['citedby_count']
df[indices]
keywords_GER = [] # Nodos de los keywords de Alemania
keywords_GER_edges = [] # Edges de Alemania
for k in df.loc[df["published_country"]=="Germany", "authkeywords"].tolist():
if pd.isna(k):
continue
k = [x.strip().lower() for x in k.split("|")]
keywords_GER_edges += list(combinations(k, 2))
keywords_GER_edges
keyword_graph_GER = nx.Graph()
keyword_graph_GER.add_edges_from(keywords_GER_edges)
# Clustering coeficient
cof_real_GER = nx.average_clustering(keyword_graph_GER)
print(f'Real: ', cof_real_GER)
list_tuples = nx.betweenness_centrality(keyword_graph_GER, normalized=True).items()
sort_tuples = sorted(list_tuples, key=lambda x: x[1], reverse=True)
for i in sort_tuples[:10]:
print('{0:4s}: {1:>}'.format(i[0], i[1]))
keywords_USA = [] # Nodos de los keywords de Alemania
keywords_USA_edges = [] # Edges de Alemania
for k in df.loc[df["published_country"]=="United States", "authkeywords"].tolist():
if pd.isna(k):
continue
k = [x.strip().lower() for x in k.split("|")]
keywords_USA_edges += list(combinations(k, 2))
keywords_USA_edges
keyword_graph_USA = nx.Graph()
keyword_graph_USA.add_edges_from(keywords_USA_edges)
nx.write_gexf(keyword_graph_USA,"keywords_USA.gexf")
# Clustering coeficient
cof_real_USA = nx.average_clustering(keyword_graph_USA)
print(f'Real: ', cof_real_USA)
list_tuples_USA = nx.betweenness_centrality(keyword_graph_USA, normalized=True).items()
sort_tuples_USA = sorted(list_tuples_USA, key=lambda x: x[1], reverse=True)
keywords_IND = [] # Nodos de los keywords de India
keywords_IND_edges = [] # Edges de India
for k in df.loc[df["published_country"]=="India", "authkeywords"].tolist():
if pd.isna(k):
continue
k = [x.strip().lower() for x in k.split("|")]
keywords_IND_edges += list(combinations(k, 2))
keywords_IND_edges
keyword_graph_IND = nx.Graph()
keyword_graph_IND.add_edges_from(keywords_IND_edges)
nx.write_gexf(keyword_graph_IND,"keywords_IND.gexf")
# Clustering coeficient
cof_real_IND = nx.average_clustering(keyword_graph_IND)
print(f'Real: ', cof_real_IND)
list_tuples_IND = nx.betweenness_centrality(keyword_graph_IND, normalized=True).items()
sort_tuples_IND = sorted(list_tuples_IND, key=lambda x: x[1], reverse=True)
for i in sort_tuples_IND[:10]:
print('{0:4s}: {1:>}'.format(i[0], i[1]))
keywords_UK = [] # Nodos de los keywords de India
keywords_UK_edges = [] # Edges de India
for k in df.loc[df["published_country"]=="India", "authkeywords"].tolist():
if pd.isna(k):
continue
k = [x.strip().lower() for x in k.split("|")]
keywords_UK_edges += list(combinations(k, 2))
keywords_UK_edges
keyword_graph_UK = nx.Graph()
keyword_graph_UK.add_edges_from(keywords_UK_edges)
# nx.write_gexf(keyword_graph_IND,"keywords_IND.gexf")
# Clustering coeficient
cof_real_UK = nx.average_clustering(keyword_graph_UK)
print(f'Real: ', cof_real_UK)
keywords_IRN = [] # Nodos de los keywords de Iran
keywords_IRN_edges = [] # Edges de Iran
for k in df.loc[df["published_country"]=="Iran", "authkeywords"].tolist():
if pd.isna(k):
continue
k = [x.strip().lower() for x in k.split("|")]
keywords_IRN_edges += list(combinations(k, 2))
keywords_IRN_edges
keyword_graph_IRN = nx.Graph()
keyword_graph_IRN.add_edges_from(keywords_IRN_edges)
nx.write_gexf(keyword_graph_IRN,"keywords_IRN.gexf")
# Clustering coeficient
cof_real_IRN = nx.average_clustering(keyword_graph_IRN)
print(f'Real: ', cof_real_IRN)
list_tuples_IRN = nx.betweenness_centrality(keyword_graph_IRN, normalized=True).items()
sort_tuples_IRN = sorted(list_tuples_IRN, key=lambda x: x[1], reverse=True)
for i in sort_tuples_IRN[:10]:
print('{0:4s}: {1:>}'.format(i[0], i[1]))
keywords_CHI = [] # Nodos de los keywords de Alemania
keywords_CHI_edges = [] # Edges de Alemania
for k in df.loc[df["published_country"]=="China", "authkeywords"].tolist():
if pd.isna(k):
continue
k = [x.strip().lower() for x in k.split("|")]
keywords_CHI_edges += list(combinations(k, 2))
keywords_CHI_edges
keyword_graph_CHI = nx.Graph()
keyword_graph_CHI.add_edges_from(keywords_CHI_edges)
nx.write_gexf(keyword_graph_CHI,"keywords_CHI.gexf")
# Clustering coeficient
cof_real_CHI = nx.average_clustering(keyword_graph_CHI)
print(f'Real: ', cof_real_CHI)
list_tuples_CHI = nx.betweenness_centrality(keyword_graph_CHI, normalized=True).items()
sort_tuples_CHI = sorted(list_tuples_CHI, key=lambda x: x[1], reverse=True)
for i in sort_tuples_CHI[:10]:
print('{0:4s}: {1:>}'.format(i[0], i[1]))
df_pib_melted[df_pib_melted["Country Name"]=="Alemania"]
pib_graph_countries = df_pib_melted.loc[((df_pib_melted["Country Code"]=="CHN")|(df_pib_melted["Country Code"]=="IRN")|(df_pib_melted["Country Code"]=="IND")|(df_pib_melted["Country Code"]=="GBR")|(df_pib_melted["Country Code"]=="DEU"))&(df_pib_melted["year"]!="2018")].sort_values(by="PIB", ascending=False)
sns.barplot(data=pib_graph_countries, x="Country Name", y="PIB", hue="year")
plt.title("PIB de los países analizados con grafos")
c = ["CHN", "IRN", "IND", "DEU", "GBR"]
c1 = [["CHN", cof_real_CHI], ["IRN", cof_real_IRN], ["IND", cof_real_IND], ["DEU", cof_real_GER], ["GBR", cof_real_UK]]
# pib_avg_coeff = np.array([c1, columns=["Country Code", "clustering_coefficient"])
pib_avg_coeff = pd.DataFrame(data=c1, columns=["Country Code", "clustering_coefficient"])
pib_avg_coeff
pib_graph_countries
pib_clust_coef = pd.merge(pib_graph_countries, pib_avg_coeff, on="Country Code")
pib_clust_coef
sns.barplot(data=pib_clust_coef, x="Country Name", y="PIB")
sns.barplot(data=pib_clust_coef, x="Country Name", y="clustering_coefficient")
keywords_CHI_edges_2019 = [] # Edges de Alemania
for k in df.loc[(df["published_country"]=="China")&(df["year"]==2019), "authkeywords"].tolist():
if pd.isna(k):
continue
k = [x.strip().lower() for x in k.split("|")]
keywords_CHI_edges_2019 += list(combinations(k, 2))
keywords_CHI_edges_2019
keyword_graph_CHI_2019 = nx.Graph()
keyword_graph_CHI_2019.add_edges_from(keywords_CHI_edges_2019)
nx.write_gexf(keyword_graph_CHI_2019,"keywords_CHI_2019.gexf")
# Clustering coeficient
cof_real_CHI_2019 = nx.average_clustering(keyword_graph_CHI_2019)
print(f'Real: ', cof_real_CHI_2019)
list_tuples_CHI_2019 = nx.betweenness_centrality(keyword_graph_CHI_2019, normalized=True).items()
sort_tuples_CHI_2019 = sorted(list_tuples_CHI_2019, key=lambda x: x[1], reverse=True)
for i in sort_tuples_CHI_2019[:10]:
print('{0:4s}: {1:>}'.format(i[0], i[1]))
keywords_CHI_edges_2020 = [] # Edges de Alemania
for k in df.loc[(df["published_country"]=="China")&(df["year"]==2020), "authkeywords"].tolist():
if pd.isna(k):
continue
k = [x.strip().lower() for x in k.split("|")]
keywords_CHI_edges_2020 += list(combinations(k, 2))
keywords_CHI_edges_2020
keyword_graph_CHI_2020 = nx.Graph()
keyword_graph_CHI_2020.add_edges_from(keywords_CHI_edges_2020)
nx.write_gexf(keyword_graph_CHI_2020,"keywords_CHI_2020.gexf")
# Clustering coeficient
cof_real_CHI_2020 = nx.average_clustering(keyword_graph_CHI_2020)
print(f'Real: ', cof_real_CHI_2020)
list_tuples_CHI_2020 = nx.betweenness_centrality(keyword_graph_CHI_2020, normalized=True).items()
sort_tuples_CHI_2020 = sorted(list_tuples_CHI_2020, key=lambda x: x[1], reverse=True)
for i in sort_tuples_CHI_2020[:10]:
print('{0:4s}: {1:>}'.format(i[0], i[1]))
| 0.380068 | 0.870872 |
```
%load_ext autoreload
%autoreload 2
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
```
## Introduction
```
from IPython.display import YouTubeVideo
YouTubeVideo(id="-oimHbVDdDA", width=560, height=315)
```
Because of the relational structure in a graph,
we can begin to think about "importance" of a node
that is induced because of its relationships
to the rest of the nodes in the graph.
Before we go on, let's think about
a pertinent and contemporary example.
### An example: contact tracing
At the time of writing (April 2020),
finding important nodes in a graph has actually taken on a measure of importance
that we might not have appreciated before.
With the COVID-19 virus spreading,
contact tracing has become quite important.
In an infectious disease contact network,
where individuals are nodes and
contact between individuals of some kind are the edges,
an "important" node in this contact network
would be an individual who was infected
who also was in contact with many people
during the time that they were infected.
### Our dataset: "Sociopatterns"
The dataset that we will use in this chapter is the "[sociopatterns network][sociopatterns]" dataset.
Incidentally, it's also about infectious diseases.
[sociopatterns]: http://konect.uni-koblenz.de/networks/sociopatterns-infectious
Here is the description of the dataset.
> This network describes the face-to-face behavior of people
> during the exhibition INFECTIOUS: STAY AWAY in 2009
> at the Science Gallery in Dublin.
> Nodes represent exhibition visitors;
> edges represent face-to-face contacts that were active for at least 20 seconds.
> Multiple edges between two nodes are possible and denote multiple contacts.
> The network contains the data from the day with the most interactions.
To simplify the network, we have represented only the last contact between individuals.
```
from nams import load_data as cf
G = cf.load_sociopatterns_network()
```
It is loaded as an undirected graph object:
```
type(G)
```
As usual, before proceeding with any analysis,
we should know basic graph statistics.
```
len(G.nodes()), len(G.edges())
```
## A Measure of Importance: "Number of Neighbors"
One measure of importance of a node is
the number of **neighbors** that the node has.
What is a **neighbor**?
We will work with the following definition:
> The neighbor of a node is connected to that node by an edge.
Let's explore this concept, using the NetworkX API.
Every NetworkX graph provides a `G.neighbors(node)` class method,
which lets us query a graph for the number of neighbors
of a given node:
```
G.neighbors(7)
```
It returns a generator that doesn't immediately return
the exact neighbors list.
This means we cannot know its exact length,
as it is a generator.
If you tried to do:
```python
len(G.neighbors(7))
```
you would get the following error:
```python
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-13-72c56971d077> in <module>
----> 1 len(G.neighbors(7))
TypeError: object of type 'dict_keyiterator' has no len()
```
Hence, we will need to cast it as a list in order to know
both its length
and its members:
```
list(G.neighbors(7))
```
In the event that some nodes have an extensive list of neighbors,
then using the `dict_keyiterator` is potentially a good memory-saving technique,
as it lazily yields the neighbors.
### Exercise: Rank-ordering the number of neighbors a node has
Since we know how to get the list of nodes that are neighbors of a given node,
try this following exercise:
> Can you create a ranked list of the importance of each individual, based on the number of neighbors they have?
Here are a few hints to help:
- You could consider using a `pandas Series`. This would be a modern and idiomatic way of approaching the problem.
- You could also consider using Python's `sorted` function.
```
from nams.solutions.hubs import rank_ordered_neighbors
#### REPLACE THE NEXT FEW LINES WITH YOUR ANSWER
# answer = rank_ordered_neighbors(G)
# answer
```
The original implementation looked like the following
```
from nams.solutions.hubs import rank_ordered_neighbors_original
# rank_ordered_neighbors_original??
```
And another implementation that uses generators:
```
from nams.solutions.hubs import rank_ordered_neighbors_generator
# rank_ordered_neighbors_generator??
```
## Generalizing "neighbors" to arbitrarily-sized graphs
The concept of neighbors is simple and appealing,
but it leaves us with a slight point of dissatisfaction:
it is difficult to compare graphs of different sizes.
Is a node more important solely because it has more neighbors?
What if it were situated in an extremely large graph?
Would we not expect it to have more neighbors?
As such, we need a normalization factor.
One reasonable one, in fact, is
_the number of nodes that a given node could **possibly** be connected to._
By taking the ratio of the number of neighbors a node has
to the number of neighbors it could possibly have,
we get the **degree centrality** metric.
Formally defined, the degree centrality of a node (let's call it $d$)
is the number of neighbors that a node has (let's call it $n$)
divided by the number of neighbors it could _possibly_ have (let's call it $N$):
$$d = \frac{n}{N}$$
NetworkX provides a function for us to calculate degree centrality conveniently:
```
import networkx as nx
import pandas as pd
dcs = pd.Series(nx.degree_centrality(G))
dcs
```
`nx.degree_centrality(G)` returns to us a dictionary of key-value pairs,
where the keys are node IDs
and values are the degree centrality score.
To save on output length, I took the liberty of casting it as a pandas Series
to make it easier to display.
Incidentally, we can also sort the series
to find the nodes with the highest degree centralities:
```
dcs.sort_values(ascending=False)
```
Does the list order look familiar?
It should, since the numerator of the degree centrality metric
is identical to the number of neighbors,
and the denominator is a constant.
## Distribution of graph metrics
One important concept that you should come to know
is that the distribution of node-centric values
can characterize classes of graphs.
What do we mean by "distribution of node-centric values"?
One would be the degree distribution,
that is, the collection of node degree values in a graph.
Generally, you might be familiar with plotting a histogram
to visualize distributions of values,
but in this book, we are going to avoid histograms like the plague.
I detail a lot of reasons in a [blog post][ecdf] I wrote in 2018,
but the main points are that:
1. It's easier to lie with histograms.
1. You get informative statistical information (median, IQR, extremes/outliers)
more easily.
[ecdf]: https://ericmjl.github.io/blog/2018/7/14/ecdfs/
### Exercise: Degree distribution
In this next exercise, we are going to get practice visualizing these values
using empirical cumulative distribution function plots.
I have written for you an ECDF function that you can use already.
Its API looks like the following:
```python
x, y = ecdf(list_of_values)
```
giving you `x` and `y` values that you can directly plot.
The exercise prompt is this:
> Plot the ECDF of the degree centrality and degree distributions.
First do it for **degree centrality**:
```
from nams.functions import ecdf
from nams.solutions.hubs import ecdf_degree_centrality
#### REPLACE THE FUNCTION CALL WITH YOUR ANSWER
ecdf_degree_centrality(G)
```
Now do it for **degree**:
```
from nams.solutions.hubs import ecdf_degree
#### REPLACE THE FUNCTION CALL WITH YOUR ANSWER
ecdf_degree(G)
```
The fact that they are identically-shaped
should not surprise you!
### Exercise: What about that denominator?
The denominator $N$ in the degree centrality definition
is "the number of nodes that a node could _possibly_ be connected to".
Can you think of two ways $N$ be defined?
```
from nams.solutions.hubs import num_possible_neighbors
#### UNCOMMENT TO SEE MY ANSWER
# print(num_possible_neighbors())
```
### Exercise: Circos Plotting
Let's get some practice with the `nxviz` API.
> Visualize the graph `G`, while ordering and colouring them by the 'order' node attribute.
```
from nams.solutions.hubs import circos_plot
#### REPLACE THE NEXT LINE WITH YOUR ANSWER
circos_plot(G)
```
### Exercise: Visual insights
Since we know that node colour and order
are by the "order" in which the person entered into the exhibit,
what does this visualization tell you?
```
from nams.solutions.hubs import visual_insights
#### UNCOMMENT THE NEXT LINE TO SEE MY ANSWER
# print(visual_insights())
```
### Exercise: Investigating degree centrality and node order
One of the insights that we might have gleaned from visualizing the graph
is that the nodes that have a high degree centrality
might also be responsible for the edges that criss-cross the Circos plot.
To test this, plot the following:
- x-axis: node degree centrality
- y-axis: maximum difference between the neighbors' `order`s (a node attribute) and the node's `order`.
```
from nams.solutions.hubs import dc_node_order
dc_node_order(G)
```
The somewhat positive correlation between the degree centrality might tell us that this trend holds true.
A further applied question would be to ask what behaviour of these nodes would give rise to this pattern.
Are these nodes actually exhibit staff?
Or is there some other reason why they are staying so long?
This, of course, would require joining in further information
that we would overlay on top of the graph
(by adding them as node or edge attributes)
before we might make further statements.
## Reflections
In this chapter, we defined a metric of node importance: the degree centrality metric.
In the example we looked at, it could help us identify
potential infectious agent superspreaders in a disease contact network.
In other settings, it might help us spot:
- message amplifiers/influencers in a social network, and
- potentially crowded airports that have lots of connections into and out of it (still relevant to infectious disease spread!)
- and many more!
What other settings can you think of in which the number of neighbors that a node has can become
a metric of importance for the node?
## Solutions
Here are the solutions to the exercises above.
```
from nams.solutions import hubs
import inspect
print(inspect.getsource(hubs))
```
|
github_jupyter
|
%load_ext autoreload
%autoreload 2
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
from IPython.display import YouTubeVideo
YouTubeVideo(id="-oimHbVDdDA", width=560, height=315)
from nams import load_data as cf
G = cf.load_sociopatterns_network()
type(G)
len(G.nodes()), len(G.edges())
G.neighbors(7)
len(G.neighbors(7))
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-13-72c56971d077> in <module>
----> 1 len(G.neighbors(7))
TypeError: object of type 'dict_keyiterator' has no len()
list(G.neighbors(7))
from nams.solutions.hubs import rank_ordered_neighbors
#### REPLACE THE NEXT FEW LINES WITH YOUR ANSWER
# answer = rank_ordered_neighbors(G)
# answer
from nams.solutions.hubs import rank_ordered_neighbors_original
# rank_ordered_neighbors_original??
from nams.solutions.hubs import rank_ordered_neighbors_generator
# rank_ordered_neighbors_generator??
import networkx as nx
import pandas as pd
dcs = pd.Series(nx.degree_centrality(G))
dcs
dcs.sort_values(ascending=False)
x, y = ecdf(list_of_values)
from nams.functions import ecdf
from nams.solutions.hubs import ecdf_degree_centrality
#### REPLACE THE FUNCTION CALL WITH YOUR ANSWER
ecdf_degree_centrality(G)
from nams.solutions.hubs import ecdf_degree
#### REPLACE THE FUNCTION CALL WITH YOUR ANSWER
ecdf_degree(G)
from nams.solutions.hubs import num_possible_neighbors
#### UNCOMMENT TO SEE MY ANSWER
# print(num_possible_neighbors())
from nams.solutions.hubs import circos_plot
#### REPLACE THE NEXT LINE WITH YOUR ANSWER
circos_plot(G)
from nams.solutions.hubs import visual_insights
#### UNCOMMENT THE NEXT LINE TO SEE MY ANSWER
# print(visual_insights())
from nams.solutions.hubs import dc_node_order
dc_node_order(G)
from nams.solutions import hubs
import inspect
print(inspect.getsource(hubs))
| 0.287068 | 0.947575 |
# Example notebook for using `cpymad` and `MAD-X` for beam line design
### <span style="color: red;">Note: this notebook likely runs "read-only", any changes might be lost!</span>
#### If running on mybinder.org $\implies$ Hit `File` $\rightarrow$ `Download as` $\rightarrow$ `Notebook` !
#### If inside your docker container $\implies$ Hit `File` $\rightarrow$ `Save Notebook As` and store it under `home/some_filename.ipynb` !
In the following you find a demonstration on how to design a FODO beam line with `MAD-X`, using the `cpymad` library for the python API.
You may run this jupyter notebook through the binder service by https://mybinder.org/ .
Enjoy to the max! :-)
Nov 2021, Adrian Oeftiger
## Basic imports
```
import numpy as np
from cpymad.madx import Madx
from matplotlib import pyplot as plt
%matplotlib inline
import seaborn as sns
sns.set_context('talk', font_scale=1.2, rc={'lines.linewidth': 3})
sns.set_style('ticks',
{'grid.linestyle': 'none', 'axes.edgecolor': '0',
'axes.linewidth': 1.2, 'legend.frameon': True,
'xtick.direction': 'out', 'ytick.direction': 'out',
'xtick.top': True, 'ytick.right': True,
})
```
## Let's get going with `MAD-X`, launching an instance
```
madx = Madx(stdout=True)
```
## Some basic parameters for our FODO cell
```
circumference = 2 # [m]
harmonic = 2
Etot = 1 # [GeV]
Qx = 0.27
Qy = 0.23
```
## Define the FODO cell and the beam description in `MAD-X`
```
madx.input('''
kqd := -3.3 * 0.1;
kqf := 3.3 * 0.1;
v := 0;
qd: quadrupole, l = 0.05, k1 := kqd / 0.1;
qf: quadrupole, l = 0.1, k1 := kqf / 0.1;
rf: rfcavity, volt := v, harmon = {1}, lag = 0;
fodo: sequence, l = {0};
qd, at = 0.025;
rf, at = {0} / 4.;
qf, at = {0} / 2.;
rf, at = {0} * 3 / 4.;
qd, at = {0} - 0.025;
endsequence;
'''.format(circumference, harmonic))
madx.command.beam(particle='proton', energy=str(Etot))
madx.use(sequence='fodo')
```
## Changing the quadrupole focusing strength to match the set tunes
```
madx.input(
'''match, sequence=fodo;
global, sequence=fodo, q1={Qx}, q2={Qy};
vary, name = kqd, step=0.0001;
vary, name = kqf, step=0.0001;
lmdif, tolerance=1e-20;
endmatch;
'''.format(Qx=Qx, Qy=Qy))
```
## Let's check with the `twiss` command for the periodic solution
```
twiss = madx.twiss()
```
## Plotting the $\beta_{x,y}$ functions
```
plt.figure(figsize=(8, 5))
plt.plot(twiss['s'], twiss['betx'], label=r'$\beta_x$', color='black')
plt.plot(twiss['s'], twiss['bety'], label=r'$\beta_y$', color='red')
plt.xlabel('Path length $s$ [m]')
plt.ylabel(r'Beta function $\beta_{x,y}$ [m.rad]')
plt.legend();
```
## With a thin lattice and some markers we can improve the plotting resolution
```
assert madx.command.select(
flag='MAKETHIN',
class_='qd',
slice_=8,
)
assert madx.command.select(
flag='MAKETHIN',
class_='qf',
slice_=16,
)
madx.command.makethin(
makedipedge=False,
style='simple', # more accurate alternative: 'teapot'
sequence='fodo',
)
madx.command.seqedit(sequence='fodo')
for s in np.linspace(start=0, stop=circumference, num=100, endpoint=False):
madx.command.install(element='Marker', at=s)
madx.command.flatten()
madx.command.endedit()
madx.use(sequence='fodo')
twiss = madx.twiss()
plt.figure(figsize=(8, 5))
plt.plot(twiss['s'], twiss['betx'], label=r'$\beta_x$', color='black')
plt.plot(twiss['s'], twiss['bety'], label=r'$\beta_y$', color='red')
plt.xlabel('Path length $s$ [m]')
plt.ylabel(r'Beta function $\beta_{x,y}$ [m.rad]')
plt.legend();
```
## $\implies$ Beautiful... now go ahead and have fun designing your own accelerator!
|
github_jupyter
|
import numpy as np
from cpymad.madx import Madx
from matplotlib import pyplot as plt
%matplotlib inline
import seaborn as sns
sns.set_context('talk', font_scale=1.2, rc={'lines.linewidth': 3})
sns.set_style('ticks',
{'grid.linestyle': 'none', 'axes.edgecolor': '0',
'axes.linewidth': 1.2, 'legend.frameon': True,
'xtick.direction': 'out', 'ytick.direction': 'out',
'xtick.top': True, 'ytick.right': True,
})
madx = Madx(stdout=True)
circumference = 2 # [m]
harmonic = 2
Etot = 1 # [GeV]
Qx = 0.27
Qy = 0.23
madx.input('''
kqd := -3.3 * 0.1;
kqf := 3.3 * 0.1;
v := 0;
qd: quadrupole, l = 0.05, k1 := kqd / 0.1;
qf: quadrupole, l = 0.1, k1 := kqf / 0.1;
rf: rfcavity, volt := v, harmon = {1}, lag = 0;
fodo: sequence, l = {0};
qd, at = 0.025;
rf, at = {0} / 4.;
qf, at = {0} / 2.;
rf, at = {0} * 3 / 4.;
qd, at = {0} - 0.025;
endsequence;
'''.format(circumference, harmonic))
madx.command.beam(particle='proton', energy=str(Etot))
madx.use(sequence='fodo')
madx.input(
'''match, sequence=fodo;
global, sequence=fodo, q1={Qx}, q2={Qy};
vary, name = kqd, step=0.0001;
vary, name = kqf, step=0.0001;
lmdif, tolerance=1e-20;
endmatch;
'''.format(Qx=Qx, Qy=Qy))
twiss = madx.twiss()
plt.figure(figsize=(8, 5))
plt.plot(twiss['s'], twiss['betx'], label=r'$\beta_x$', color='black')
plt.plot(twiss['s'], twiss['bety'], label=r'$\beta_y$', color='red')
plt.xlabel('Path length $s$ [m]')
plt.ylabel(r'Beta function $\beta_{x,y}$ [m.rad]')
plt.legend();
assert madx.command.select(
flag='MAKETHIN',
class_='qd',
slice_=8,
)
assert madx.command.select(
flag='MAKETHIN',
class_='qf',
slice_=16,
)
madx.command.makethin(
makedipedge=False,
style='simple', # more accurate alternative: 'teapot'
sequence='fodo',
)
madx.command.seqedit(sequence='fodo')
for s in np.linspace(start=0, stop=circumference, num=100, endpoint=False):
madx.command.install(element='Marker', at=s)
madx.command.flatten()
madx.command.endedit()
madx.use(sequence='fodo')
twiss = madx.twiss()
plt.figure(figsize=(8, 5))
plt.plot(twiss['s'], twiss['betx'], label=r'$\beta_x$', color='black')
plt.plot(twiss['s'], twiss['bety'], label=r'$\beta_y$', color='red')
plt.xlabel('Path length $s$ [m]')
plt.ylabel(r'Beta function $\beta_{x,y}$ [m.rad]')
plt.legend();
| 0.47171 | 0.939858 |
# EIS metadata validation script
Used to validate Planon output with spreadsheet input
## 1. Data import
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
```
Read data. There are two datasets: Planon and Master. The latter is the EIS data nomencalture that was created. Master is made up of two subsets: loggers and meters. Loggers are sometimes called controllers and meters are sometimes called sensors. In rare cases meters or sensors are also called channels.
```
planon=pd.read_excel('EIS Assets v2.xlsx',index_col = 'Code')
#master_loggerscontrollers_old = pd.read_csv('LoggersControllers.csv', index_col = 'Asset Code')
#master_meterssensors_old = pd.read_csv('MetersSensors.csv', encoding = 'macroman', index_col = 'Asset Code')
master='MASTER PlanonLoggersAndMeters 17 10 16.xlsx'
master_loggerscontrollers=pd.read_excel(master,sheetname='Loggers Controllers', index_col = 'Asset Code')
master_meterssensors=pd.read_excel(master,sheetname='Meters Sensors', encoding = 'macroman', index_col = 'Asset Code')
planon['Code']=planon.index
master_loggerscontrollers['Code']=master_loggerscontrollers.index
master_meterssensors['Code']=master_meterssensors.index
set(master_meterssensors['Classification Group'])
set(master_loggerscontrollers['Classification Group'])
new_index=[]
for i in master_meterssensors.index:
if '/' not in i:
new_index.append(i[:i.find('-')+1]+i[i.find('-')+1:].replace('-','/'))
else:
new_index.append(i)
master_meterssensors.index=new_index
master_meterssensors['Code']=master_meterssensors.index
new_index=[]
for i in master_meterssensors.index:
logger=i[:i.find('/')]
if master_loggerscontrollers.loc[logger]['Classification Group']=='BMS controller':
meter=i[i.find('/')+1:]
if meter[0] not in {'N','n','o','i'}:
new_index.append(i)
else:
new_index.append(i)
len(master_meterssensors)
master_meterssensors=master_meterssensors.loc[new_index]
len(master_meterssensors)
master_meterssensors.to_csv('meterssensors.csv')
master_loggerscontrollers.to_csv('loggerscontrollers.csv')
```
Unify index, caps everything and strip of trailing spaces.
```
planon.index=[str(i).strip() for i in planon.index]
master_loggerscontrollers.index=[str(i).strip() for i in master_loggerscontrollers.index]
master_meterssensors.index=[str(i).strip() for i in master_meterssensors.index]
```
Drop duplicates (shouldn't be any)
```
planon.drop_duplicates(inplace=True)
master_loggerscontrollers.drop_duplicates(inplace=True)
master_meterssensors.drop_duplicates(inplace=True)
```
Split Planon import into loggers and meters
Drop duplicates (shouldn't be any)
```
# Split the Planon file into 2, one for loggers & controllers, and one for meters & sensors.
planon_loggerscontrollers = planon.loc[(planon['Classification Group'] == 'EN.EN4 BMS Controller') | (planon['Classification Group'] == 'EN.EN1 Data Logger')]
planon_meterssensors = planon.loc[(planon['Classification Group'] == 'EN.EN2 Energy Meter') | (planon['Classification Group'] == 'EN.EN3 Energy Sensor')]
planon_loggerscontrollers.drop_duplicates(inplace=True)
planon_meterssensors.drop_duplicates(inplace=True)
```
Index unique? show number of duplicates in index
```
len(planon_loggerscontrollers.index[planon_loggerscontrollers.index.duplicated()])
len(planon_meterssensors.index[planon_meterssensors.index.duplicated()])
```
Meters are not unique. This is becasue of the spaces served. This is ok for now, we will deal with duplicates at the comparison stage. Same is true for loggers - in the unlikely event that there are duplicates in the future.
```
planon_meterssensors.head(3)
```
## 2. Validation
Create list of all buildings present in Planon export. These are buildings to check the data against from Master.
```
buildings=set(planon_meterssensors['BuildingNo.'])
buildings
len(buildings)
```
### 2.1. Meters
Create dataframe slice for validation from `master_meterssensors` where the only the buildings located in `buildings` are contained. Save this new slice into `master_meterssensors_for_validation`. This is done by creating sub-slices of the dataframe for each building, then concatenating them all together.
```
master_meterssensors_for_validation = \
pd.concat([master_meterssensors.loc[master_meterssensors['Building Code'] == building] \
for building in buildings])
master_meterssensors_for_validation.head(2)
#alternative method
master_meterssensors_for_validation2 = \
master_meterssensors[master_meterssensors['Building Code'].isin(buildings)]
master_meterssensors_for_validation2.head(2)
```
Planon sensors are not unique because of the spaces served convention in the two data architectures. The Planon architecture devotes a new line for each space served - hence the not unique index. The Master architecture lists all the spaces only once, as a list, therefore it has a unique index. We will need to take this into account and create matching dataframe out of planon for comparison, with a unique index.
```
len(master_meterssensors_for_validation)
len(planon_meterssensors)-len(planon_meterssensors.index[planon_meterssensors.index.duplicated()])
```
Sort datasets after index for easier comparison.
```
master_meterssensors_for_validation.sort_index(inplace=True)
planon_meterssensors.sort_index(inplace=True)
```
#### 2.1.1 Slicing of meters to only certain columns of comparison
```
planon_meterssensors.T
master_meterssensors_for_validation.T
```
Create dictionary that maps Planon column names onto Master.
From Nicola:
- Code (Asset Code)
- Description
- EIS ID (Channel)
- Utility Type
- Fiscal Meter
- Tenant Meter
`Building code` and `Building name` are implicitly included. `Logger Serial Number`, `IP` or `MAC` would be essential to include, as well as `Make` and `Model`. `Additional Location Info` is not essnetial but would be useful to have. Locations (`Locations.Space.Space number` and `Space Name`) are included in the Planon export - but this is their only viable data source, therefore are not validated against.
```
#Planon:Master
meters_match_dict={
"BuildingNo.":"Building Code",
"Building":"Building Name",
"Description":"Description",
"EIS ID":"Logger Channel",
"Tenant Meter.Name":"Tenant meter",
"Fiscal Meter.Name":"Fiscal meter",
"Code":"Code"
}
```
Filter both dataframes based on these new columns. Then remove duplicates. Currently, this leads to loss of information of spaces served, but also a unique index for the Planon dataframe, therefore bringing the dataframes closer to each other. When including spaces explicitly in the comparison (if we want to - or just trust the Planon space mapping), this needs to be modified.
```
master_meterssensors_for_validation_filtered=master_meterssensors_for_validation[list(meters_match_dict.values())]
planon_meterssensors_filtered=planon_meterssensors[list(meters_match_dict.keys())]
master_meterssensors_for_validation_filtered.head(2)
planon_meterssensors_filtered.head(2)
```
Unify headers, drop duplicates (bear the mind the spaces argument, this where it needs to be brought back in in the future!).
```
planon_meterssensors_filtered.columns=[meters_match_dict[i] for i in planon_meterssensors_filtered]
planon_meterssensors_filtered.drop_duplicates(inplace=True)
master_meterssensors_for_validation_filtered.drop_duplicates(inplace=True)
planon_meterssensors_filtered.head(2)
```
Fiscal/Tenant meter name needs fixing from Yes/No and 1/0.
```
planon_meterssensors_filtered['Fiscal meter']=planon_meterssensors_filtered['Fiscal meter'].isin(['Yes'])
planon_meterssensors_filtered['Tenant meter']=planon_meterssensors_filtered['Tenant meter'].isin(['Yes'])
master_meterssensors_for_validation_filtered['Fiscal meter']=master_meterssensors_for_validation_filtered['Fiscal meter'].isin([1])
master_meterssensors_for_validation_filtered['Tenant meter']=master_meterssensors_for_validation_filtered['Tenant meter'].isin([1])
master_meterssensors_for_validation_filtered.head(2)
planon_meterssensors_filtered.head(2)
```
Cross-check missing meters
```
a=np.sort(list(set(planon_meterssensors_filtered.index)))
b=np.sort(list(set(master_meterssensors_for_validation_filtered.index)))
meterssensors_not_in_planon=[]
for i in b:
if i not in a:
print(i+',',end=" "),
meterssensors_not_in_planon.append(i)
print('\n\nMeters in Master, but not in Planon:',
len(meterssensors_not_in_planon),'/',len(b),':',
round(len(meterssensors_not_in_planon)/len(b)*100,3),'%')
(set([i[:5] for i in meterssensors_not_in_planon]))
a=np.sort(list(set(planon_meterssensors_filtered.index)))
b=np.sort(list(set(master_meterssensors_for_validation_filtered.index)))
meterssensors_not_in_master=[]
for i in a:
if i not in b:
print(i+',',end=" "),
meterssensors_not_in_master.append(i)
print('\n\nMeters in Planon, not in Master:',
len(meterssensors_not_in_master),'/',len(a),':',
round(len(meterssensors_not_in_master)/len(a)*100,3),'%')
len(set([i for i in meterssensors_not_in_master]))
set([i[:9] for i in meterssensors_not_in_master])
set([i[:5] for i in meterssensors_not_in_master])
```
Check for duplicates in index, but not duplicates over the entire row
```
print(len(planon_meterssensors_filtered.index))
print(len(set(planon_meterssensors_filtered.index)))
print(len(master_meterssensors_for_validation_filtered.index))
print(len(set(master_meterssensors_for_validation_filtered.index)))
master_meterssensors_for_validation_filtered[master_meterssensors_for_validation_filtered.index.duplicated()]
```
The duplicates are the `nan`s. Remove these for now. Could revisit later to do an index-less comparison, only over row contents.
```
good_index=[i for i in master_meterssensors_for_validation_filtered.index if str(i).lower().strip()!='nan']
master_meterssensors_for_validation_filtered=master_meterssensors_for_validation_filtered.loc[good_index]
master_meterssensors_for_validation_filtered.drop_duplicates(inplace=True)
len(planon_meterssensors_filtered)
len(master_meterssensors_for_validation_filtered)
```
Do comparison only on common indices. Need to revisit and identify the cause missing meters, both ways (5 Planon->Meters and 30 Meters->Planon in this example).
```
comon_index=list(set(master_meterssensors_for_validation_filtered.index).intersection(set(planon_meterssensors_filtered.index)))
len(comon_index)
master_meterssensors_for_validation_intersected=master_meterssensors_for_validation_filtered.loc[comon_index].sort_index()
planon_meterssensors_intersected=planon_meterssensors_filtered.loc[comon_index].sort_index()
len(master_meterssensors_for_validation_intersected)
len(planon_meterssensors_intersected)
```
Still have duplicate indices. For now we just drop and keep the first.
```
master_meterssensors_for_validation_intersected = master_meterssensors_for_validation_intersected[~master_meterssensors_for_validation_intersected.index.duplicated(keep='first')]
master_meterssensors_for_validation_intersected.head(2)
planon_meterssensors_intersected.head(2)
```
#### 2.1.2. Primitive comparison
```
planon_meterssensors_intersected==master_meterssensors_for_validation_intersected
np.all(planon_meterssensors_intersected==master_meterssensors_for_validation_intersected)
```
#### 2.1.3. Horizontal comparison
Number of cells matching
```
(planon_meterssensors_intersected==master_meterssensors_for_validation_intersected).sum()
```
Percentage matching
```
(planon_meterssensors_intersected==master_meterssensors_for_validation_intersected).sum()/\
len(planon_meterssensors_intersected)*100
((planon_meterssensors_intersected==master_meterssensors_for_validation_intersected).sum()/\
len(planon_meterssensors_intersected)*100).plot(kind='bar')
```
#### 2.1.4. Vertical comparison
```
df=pd.DataFrame((planon_meterssensors_intersected.T==master_meterssensors_for_validation_intersected.T).sum())
df
df=pd.DataFrame((planon_meterssensors_intersected.T==master_meterssensors_for_validation_intersected.T).sum()/\
len(planon_meterssensors_intersected.T)*100)
df[df[0]<100]
```
#### 2.1.5. Smart(er) comparison
Not all of the dataframe matches. Let us do some basic string formatting, maybe that helps.
```
sum(planon_meterssensors_intersected['Description']!=master_meterssensors_for_validation_intersected['Description'])
planon_meterssensors_intersected['Description']=[str(s).lower().strip()\
.replace(' ',' ').replace(' ',' ').replace('nan','')\
for s in planon_meterssensors_intersected['Description'].values]
master_meterssensors_for_validation_intersected['Description']=[str(s).lower().strip()\
.replace(' ',' ').replace(' ',' ').replace('nan','')\
for s in master_meterssensors_for_validation_intersected['Description'].values]
sum(planon_meterssensors_intersected['Description']!=master_meterssensors_for_validation_intersected['Description'])
```
Some errors fixed, some left. Let's see which ones. These are either:
- Wrong duplicate dropped
- Input human erros in the description.
- Actual erros somewhere in the indexing.
```
for i in planon_meterssensors_intersected[planon_meterssensors_intersected['Description']!=master_meterssensors_for_validation_intersected['Description']].index:
print(i,'\t\tPlanon:',planon_meterssensors_intersected.loc[i]['Description'],'\t\tMaster:',master_meterssensors_for_validation_intersected.loc[i]['Description'])
```
Let us repeat the exercise for `Logger Channel`. Cross-validate, flag as highly likely error where both mismatch.
```
sum(planon_meterssensors_intersected['Logger Channel']!=master_meterssensors_for_validation_intersected['Logger Channel'])
planon_meterssensors_intersected['Logger Channel']=[str(s).lower().strip().replace(' ',' ').replace(' ',' ') for s in planon_meterssensors_intersected['Logger Channel'].values]
master_meterssensors_for_validation_intersected['Logger Channel']=[str(s).lower().strip().replace(' ',' ').replace(' ',' ') for s in master_meterssensors_for_validation_intersected['Logger Channel'].values]
sum(planon_meterssensors_intersected['Logger Channel']!=master_meterssensors_for_validation_intersected['Logger Channel'])
```
All errors fixed on logger channels.
```
for i in planon_meterssensors_intersected[planon_meterssensors_intersected['Logger Channel']!=master_meterssensors_for_validation_intersected['Logger Channel']].index:
print(i,'\t\tPlanon:',planon_meterssensors_intersected.loc[i]['Logger Channel'],'\t\tMaster:',master_meterssensors_for_validation_intersected.loc[i]['Logger Channel'])
```
New error percentage:
```
(planon_meterssensors_intersected!=master_meterssensors_for_validation_intersected).sum()/\
len(planon_meterssensors_intersected)*100
```
### 2.2. Loggers
```
buildings=set(planon_loggerscontrollers['BuildingNo.'])
buildings
master_loggerscontrollers_for_validation = \
pd.concat([master_loggerscontrollers.loc[master_loggerscontrollers['Building Code'] == building] \
for building in buildings])
master_loggerscontrollers_for_validation.head(2)
len(master_loggerscontrollers_for_validation)
len(planon_loggerscontrollers)-len(planon_loggerscontrollers.index[planon_loggerscontrollers.index.duplicated()])
master_loggerscontrollers_for_validation.sort_index(inplace=True)
planon_loggerscontrollers.sort_index(inplace=True)
planon_loggerscontrollers.T
master_loggerscontrollers_for_validation.T
```
Create dictionary that maps Planon column names onto Master.
From Nicola:
- EIS ID (Serial Number)
- Make
- Model
- Description
- Code (Asset Code)
- Building Code
`Building code` and `Building name` are implicitly included. `Logger IP` or `MAC` would be essential to include, as well as `Make` and `Model`. `Additional Location Info` is not essnetial but would be useful to have. Locations (`Locations.Space.Space number` and `Space Name`) are included in the Planon export - but this is their only viable data source, therefore are not validated against.
```
#Planon:Master
loggers_match_dict={
"BuildingNo.":"Building Code",
"Building":"Building Name",
"Description":"Description",
"EIS ID":"Logger Serial Number",
"Make":"Make",
"Model":"Model",
"Code":"Code"
}
master_loggerscontrollers_for_validation_filtered=master_loggerscontrollers_for_validation[list(loggers_match_dict.values())]
planon_loggerscontrollers_filtered=planon_loggerscontrollers[list(loggers_match_dict.keys())]
master_loggerscontrollers_for_validation_filtered.head(2)
planon_loggerscontrollers_filtered.head(2)
planon_loggerscontrollers_filtered.columns=[loggers_match_dict[i] for i in planon_loggerscontrollers_filtered]
planon_loggerscontrollers_filtered.drop_duplicates(inplace=True)
master_loggerscontrollers_for_validation_filtered.drop_duplicates(inplace=True)
planon_loggerscontrollers_filtered.head(2)
master_loggerscontrollers_for_validation_filtered.head(2)
a=np.sort(list(set(planon_loggerscontrollers_filtered.index)))
b=np.sort(list(set(master_loggerscontrollers_for_validation_filtered.index)))
loggerscontrollers_not_in_planon=[]
for i in b:
if i not in a:
print(i+',',end=" "),
loggerscontrollers_not_in_planon.append(i)
print('\n\nLoggers in Master, but not in Planon:',
len(loggerscontrollers_not_in_planon),'/',len(b),':',
round(len(loggerscontrollers_not_in_planon)/len(b)*100,3),'%')
a=np.sort(list(set(planon_loggerscontrollers_filtered.index)))
b=np.sort(list(set(master_loggerscontrollers_for_validation_filtered.index)))
loggerscontrollers_not_in_master=[]
for i in a:
if i not in b:
print(i+',',end=" "),
loggerscontrollers_not_in_master.append(i)
print('\n\nLoggers in Planon, not in Master:',
len(loggerscontrollers_not_in_master),'/',len(a),':',
round(len(loggerscontrollers_not_in_master)/len(a)*100,3),'%')
print(len(planon_loggerscontrollers_filtered.index))
print(len(set(planon_loggerscontrollers_filtered.index)))
print(len(master_loggerscontrollers_for_validation_filtered.index))
print(len(set(master_loggerscontrollers_for_validation_filtered.index)))
master_loggerscontrollers_for_validation_filtered[master_loggerscontrollers_for_validation_filtered.index.duplicated()]
comon_index=list(set(master_loggerscontrollers_for_validation_filtered.index).intersection(set(planon_loggerscontrollers_filtered.index)))
master_loggerscontrollers_for_validation_intersected=master_loggerscontrollers_for_validation_filtered.loc[comon_index].sort_index()
planon_loggerscontrollers_intersected=planon_loggerscontrollers_filtered.loc[comon_index].sort_index()
master_loggerscontrollers_for_validation_intersected.head(2)
planon_loggerscontrollers_intersected.head(2)
planon_loggerscontrollers_intersected==master_loggerscontrollers_for_validation_intersected
```
Loggers matching
```
(planon_loggerscontrollers_intersected==master_loggerscontrollers_for_validation_intersected).sum()
```
Percentage matching
```
(planon_loggerscontrollers_intersected==master_loggerscontrollers_for_validation_intersected).sum()/\
len(planon_loggerscontrollers_intersected)*100
((planon_loggerscontrollers_intersected==master_loggerscontrollers_for_validation_intersected).sum()/\
len(planon_loggerscontrollers_intersected)*100).plot(kind='bar')
```
Loggers not matching on `Building Name`.
```
sum(planon_loggerscontrollers_intersected['Building Name']!=master_loggerscontrollers_for_validation_intersected['Building Name'])
planon_loggerscontrollers_intersected['Building Name']=[str(s).lower().strip().replace(' ',' ').replace(' ',' ') for s in planon_loggerscontrollers_intersected['Building Name'].values]
master_loggerscontrollers_for_validation_intersected['Building Name']=[str(s).lower().strip().replace(' ',' ').replace(' ',' ') for s in master_loggerscontrollers_for_validation_intersected['Building Name'].values]
sum(planon_loggerscontrollers_intersected['Building Name']!=master_loggerscontrollers_for_validation_intersected['Building Name'])
```
That didnt help.
```
for i in planon_loggerscontrollers_intersected[planon_loggerscontrollers_intersected['Building Name']!=master_loggerscontrollers_for_validation_intersected['Building Name']].index:
print(i,'\t\tPlanon:',planon_loggerscontrollers_intersected.loc[i]['Building Name'],'\t\tMaster:',master_loggerscontrollers_for_validation_intersected.loc[i]['Building Name'])
```
Follow up with lexical distance comparison. That would flag this as a match.
Loggers not matching on `Serial Number`.
```
sum(planon_loggerscontrollers_intersected['Logger Serial Number']!=master_loggerscontrollers_for_validation_intersected['Logger Serial Number'])
planon_loggerscontrollers_intersected['Logger Serial Number']=[str(s).lower().strip().replace(' ',' ').replace(' ',' ').replace('{','').replace('}','') for s in planon_loggerscontrollers_intersected['Logger Serial Number'].values]
master_loggerscontrollers_for_validation_intersected['Logger Serial Number']=[str(s).lower().strip().replace(' ',' ').replace(' ',' ').replace('{','').replace('}','') for s in master_loggerscontrollers_for_validation_intersected['Logger Serial Number'].values]
sum(planon_loggerscontrollers_intersected['Logger Serial Number']!=master_loggerscontrollers_for_validation_intersected['Logger Serial Number'])
for i in planon_loggerscontrollers_intersected[planon_loggerscontrollers_intersected['Logger Serial Number']!=master_loggerscontrollers_for_validation_intersected['Logger Serial Number']].index:
print(i,'\t\tPlanon:',planon_loggerscontrollers_intersected.loc[i]['Logger Serial Number'],'\t\tMaster:',master_loggerscontrollers_for_validation_intersected.loc[i]['Logger Serial Number'])
```
Technically the same, but there is a number format error. Compare based on float value, if they match, replace one of them. This needs to be amended, as it will throw `cannot onvert to float` exception if strings are left in from the previous step.
```
z1=[]
z2=[]
for i in planon_loggerscontrollers_intersected.index:
if planon_loggerscontrollers_intersected.loc[i]['Logger Serial Number']!=master_loggerscontrollers_for_validation_intersected.loc[i]['Logger Serial Number']:
if float(planon_loggerscontrollers_intersected.loc[i]['Logger Serial Number'])==\
float(master_loggerscontrollers_for_validation_intersected.loc[i]['Logger Serial Number']):
z1.append(str(int(float(planon_loggerscontrollers_intersected.loc[i]['Logger Serial Number']))))
z2.append(str(int(float(planon_loggerscontrollers_intersected.loc[i]['Logger Serial Number']))))
else:
z1.append(planon_loggerscontrollers_intersected.loc[i]['Logger Serial Number'])
z2.append(master_loggerscontrollers_for_validation_intersected.loc[i]['Logger Serial Number'])
else:
z1.append(planon_loggerscontrollers_intersected.loc[i]['Logger Serial Number'])
z2.append(planon_loggerscontrollers_intersected.loc[i]['Logger Serial Number'])
planon_loggerscontrollers_intersected['Logger Serial Number']=z1
master_loggerscontrollers_for_validation_intersected['Logger Serial Number']=z2
for i in planon_loggerscontrollers_intersected[planon_loggerscontrollers_intersected['Logger Serial Number']!=master_loggerscontrollers_for_validation_intersected['Logger Serial Number']].index:
print(i,'\t\tPlanon:',planon_loggerscontrollers_intersected.loc[i]['Logger Serial Number'],'\t\tMaster:',master_loggerscontrollers_for_validation_intersected.loc[i]['Logger Serial Number'])
```
New error percentage:
```
(planon_loggerscontrollers_intersected!=master_loggerscontrollers_for_validation_intersected).sum()/\
len(planon_loggerscontrollers_intersected)*100
```
(Bearing in my mind the above, this is technically 0)
```
a=np.sort(list(set(planon_meterssensors_filtered.index)))
b=np.sort(list(set(master_meterssensors_for_validation_filtered.index)))
meterssensors_not_in_planon=[]
for i in b:
if i not in a:
print(i+',',end=" "),
meterssensors_not_in_planon.append(i)
print('\n\nMeters in Master, but not in Planon:',
len(meterssensors_not_in_planon),'/',len(b),':',
round(len(meterssensors_not_in_planon)/len(b)*100,3),'%')
q1=pd.DataFrame(meterssensors_not_in_planon)
a=np.sort(list(set(planon_meterssensors_filtered.index)))
b=np.sort(list(set(master_meterssensors_for_validation_filtered.index)))
meterssensors_not_in_master=[]
for i in a:
if i not in b:
print(i+',',end=" "),
meterssensors_not_in_master.append(i)
print('\n\nMeters in Planon, not in Master:',
len(meterssensors_not_in_master),'/',len(a),':',
round(len(meterssensors_not_in_master)/len(a)*100,3),'%')
q2=pd.DataFrame(meterssensors_not_in_master)
a=np.sort(list(set(planon_loggerscontrollers_filtered.index)))
b=np.sort(list(set(master_loggerscontrollers_for_validation_filtered.index)))
loggerscontrollers_not_in_planon=[]
for i in b:
if i not in a:
print(i+',',end=" "),
loggerscontrollers_not_in_planon.append(i)
print('\n\nLoggers in Master, but not in Planon:',
len(loggerscontrollers_not_in_planon),'/',len(b),':',
round(len(loggerscontrollers_not_in_planon)/len(b)*100,3),'%')
q3=pd.DataFrame(loggerscontrollers_not_in_planon)
a=np.sort(list(set(planon_loggerscontrollers_filtered.index)))
b=np.sort(list(set(master_loggerscontrollers_for_validation_filtered.index)))
loggerscontrollers_not_in_master=[]
for i in a:
if i not in b:
print(i+',',end=" "),
loggerscontrollers_not_in_master.append(i)
print('\n\nLoggers in Planon, not in Master:',
len(loggerscontrollers_not_in_master),'/',len(a),':',
round(len(loggerscontrollers_not_in_master)/len(a)*100,3),'%')
q4=pd.DataFrame(loggerscontrollers_not_in_master)
q5=pd.DataFrame((planon_meterssensors_intersected!=master_meterssensors_for_validation_intersected).sum()/\
len(planon_meterssensors_intersected)*100)
q6=pd.DataFrame((planon_loggerscontrollers_intersected!=master_loggerscontrollers_for_validation_intersected).sum()/\
len(planon_loggerscontrollers_intersected)*100)
w1=[]
for i in planon_meterssensors_intersected[planon_meterssensors_intersected['Description']!=master_meterssensors_for_validation_intersected['Description']].index:
w1.append({"Meter":i,'Planon':planon_meterssensors_intersected.loc[i]['Description'],
'Master':master_meterssensors_for_validation_intersected.loc[i]['Description']})
q7=pd.DataFrame(w1)
w2=[]
for i in planon_loggerscontrollers_intersected[planon_loggerscontrollers_intersected['Building Name']!=master_loggerscontrollers_for_validation_intersected['Building Name']].index:
w2.append({"Logger":i,'Planon':planon_loggerscontrollers_intersected.loc[i]['Building Name'],
'Master':master_loggerscontrollers_for_validation_intersected.loc[i]['Building Name']})
q8=pd.DataFrame(w2)
writer = pd.ExcelWriter('final5b.xlsx')
q1.to_excel(writer,'Meters Master, not Planon')
q2.to_excel(writer,'Meters Planon, not Master')
q3.to_excel(writer,'Loggers Master, not Planon')
q4.to_excel(writer,'Loggers Planon, not Master')
q5.to_excel(writer,'Meters error perc')
q6.to_excel(writer,'Loggers error perc')
q7.to_excel(writer,'Meters naming conflcits')
q1
q9=[]
try:
for i in q1[0].values:
if i[:i.find('/')] not in set(q3[0].values):
q9.append(i)
except:pass
pd.DataFrame(q9).to_excel(writer,'Meters Master, not Planon, not Logger')
writer.save()
```
|
github_jupyter
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
planon=pd.read_excel('EIS Assets v2.xlsx',index_col = 'Code')
#master_loggerscontrollers_old = pd.read_csv('LoggersControllers.csv', index_col = 'Asset Code')
#master_meterssensors_old = pd.read_csv('MetersSensors.csv', encoding = 'macroman', index_col = 'Asset Code')
master='MASTER PlanonLoggersAndMeters 17 10 16.xlsx'
master_loggerscontrollers=pd.read_excel(master,sheetname='Loggers Controllers', index_col = 'Asset Code')
master_meterssensors=pd.read_excel(master,sheetname='Meters Sensors', encoding = 'macroman', index_col = 'Asset Code')
planon['Code']=planon.index
master_loggerscontrollers['Code']=master_loggerscontrollers.index
master_meterssensors['Code']=master_meterssensors.index
set(master_meterssensors['Classification Group'])
set(master_loggerscontrollers['Classification Group'])
new_index=[]
for i in master_meterssensors.index:
if '/' not in i:
new_index.append(i[:i.find('-')+1]+i[i.find('-')+1:].replace('-','/'))
else:
new_index.append(i)
master_meterssensors.index=new_index
master_meterssensors['Code']=master_meterssensors.index
new_index=[]
for i in master_meterssensors.index:
logger=i[:i.find('/')]
if master_loggerscontrollers.loc[logger]['Classification Group']=='BMS controller':
meter=i[i.find('/')+1:]
if meter[0] not in {'N','n','o','i'}:
new_index.append(i)
else:
new_index.append(i)
len(master_meterssensors)
master_meterssensors=master_meterssensors.loc[new_index]
len(master_meterssensors)
master_meterssensors.to_csv('meterssensors.csv')
master_loggerscontrollers.to_csv('loggerscontrollers.csv')
planon.index=[str(i).strip() for i in planon.index]
master_loggerscontrollers.index=[str(i).strip() for i in master_loggerscontrollers.index]
master_meterssensors.index=[str(i).strip() for i in master_meterssensors.index]
planon.drop_duplicates(inplace=True)
master_loggerscontrollers.drop_duplicates(inplace=True)
master_meterssensors.drop_duplicates(inplace=True)
# Split the Planon file into 2, one for loggers & controllers, and one for meters & sensors.
planon_loggerscontrollers = planon.loc[(planon['Classification Group'] == 'EN.EN4 BMS Controller') | (planon['Classification Group'] == 'EN.EN1 Data Logger')]
planon_meterssensors = planon.loc[(planon['Classification Group'] == 'EN.EN2 Energy Meter') | (planon['Classification Group'] == 'EN.EN3 Energy Sensor')]
planon_loggerscontrollers.drop_duplicates(inplace=True)
planon_meterssensors.drop_duplicates(inplace=True)
len(planon_loggerscontrollers.index[planon_loggerscontrollers.index.duplicated()])
len(planon_meterssensors.index[planon_meterssensors.index.duplicated()])
planon_meterssensors.head(3)
buildings=set(planon_meterssensors['BuildingNo.'])
buildings
len(buildings)
master_meterssensors_for_validation = \
pd.concat([master_meterssensors.loc[master_meterssensors['Building Code'] == building] \
for building in buildings])
master_meterssensors_for_validation.head(2)
#alternative method
master_meterssensors_for_validation2 = \
master_meterssensors[master_meterssensors['Building Code'].isin(buildings)]
master_meterssensors_for_validation2.head(2)
len(master_meterssensors_for_validation)
len(planon_meterssensors)-len(planon_meterssensors.index[planon_meterssensors.index.duplicated()])
master_meterssensors_for_validation.sort_index(inplace=True)
planon_meterssensors.sort_index(inplace=True)
planon_meterssensors.T
master_meterssensors_for_validation.T
#Planon:Master
meters_match_dict={
"BuildingNo.":"Building Code",
"Building":"Building Name",
"Description":"Description",
"EIS ID":"Logger Channel",
"Tenant Meter.Name":"Tenant meter",
"Fiscal Meter.Name":"Fiscal meter",
"Code":"Code"
}
master_meterssensors_for_validation_filtered=master_meterssensors_for_validation[list(meters_match_dict.values())]
planon_meterssensors_filtered=planon_meterssensors[list(meters_match_dict.keys())]
master_meterssensors_for_validation_filtered.head(2)
planon_meterssensors_filtered.head(2)
planon_meterssensors_filtered.columns=[meters_match_dict[i] for i in planon_meterssensors_filtered]
planon_meterssensors_filtered.drop_duplicates(inplace=True)
master_meterssensors_for_validation_filtered.drop_duplicates(inplace=True)
planon_meterssensors_filtered.head(2)
planon_meterssensors_filtered['Fiscal meter']=planon_meterssensors_filtered['Fiscal meter'].isin(['Yes'])
planon_meterssensors_filtered['Tenant meter']=planon_meterssensors_filtered['Tenant meter'].isin(['Yes'])
master_meterssensors_for_validation_filtered['Fiscal meter']=master_meterssensors_for_validation_filtered['Fiscal meter'].isin([1])
master_meterssensors_for_validation_filtered['Tenant meter']=master_meterssensors_for_validation_filtered['Tenant meter'].isin([1])
master_meterssensors_for_validation_filtered.head(2)
planon_meterssensors_filtered.head(2)
a=np.sort(list(set(planon_meterssensors_filtered.index)))
b=np.sort(list(set(master_meterssensors_for_validation_filtered.index)))
meterssensors_not_in_planon=[]
for i in b:
if i not in a:
print(i+',',end=" "),
meterssensors_not_in_planon.append(i)
print('\n\nMeters in Master, but not in Planon:',
len(meterssensors_not_in_planon),'/',len(b),':',
round(len(meterssensors_not_in_planon)/len(b)*100,3),'%')
(set([i[:5] for i in meterssensors_not_in_planon]))
a=np.sort(list(set(planon_meterssensors_filtered.index)))
b=np.sort(list(set(master_meterssensors_for_validation_filtered.index)))
meterssensors_not_in_master=[]
for i in a:
if i not in b:
print(i+',',end=" "),
meterssensors_not_in_master.append(i)
print('\n\nMeters in Planon, not in Master:',
len(meterssensors_not_in_master),'/',len(a),':',
round(len(meterssensors_not_in_master)/len(a)*100,3),'%')
len(set([i for i in meterssensors_not_in_master]))
set([i[:9] for i in meterssensors_not_in_master])
set([i[:5] for i in meterssensors_not_in_master])
print(len(planon_meterssensors_filtered.index))
print(len(set(planon_meterssensors_filtered.index)))
print(len(master_meterssensors_for_validation_filtered.index))
print(len(set(master_meterssensors_for_validation_filtered.index)))
master_meterssensors_for_validation_filtered[master_meterssensors_for_validation_filtered.index.duplicated()]
good_index=[i for i in master_meterssensors_for_validation_filtered.index if str(i).lower().strip()!='nan']
master_meterssensors_for_validation_filtered=master_meterssensors_for_validation_filtered.loc[good_index]
master_meterssensors_for_validation_filtered.drop_duplicates(inplace=True)
len(planon_meterssensors_filtered)
len(master_meterssensors_for_validation_filtered)
comon_index=list(set(master_meterssensors_for_validation_filtered.index).intersection(set(planon_meterssensors_filtered.index)))
len(comon_index)
master_meterssensors_for_validation_intersected=master_meterssensors_for_validation_filtered.loc[comon_index].sort_index()
planon_meterssensors_intersected=planon_meterssensors_filtered.loc[comon_index].sort_index()
len(master_meterssensors_for_validation_intersected)
len(planon_meterssensors_intersected)
master_meterssensors_for_validation_intersected = master_meterssensors_for_validation_intersected[~master_meterssensors_for_validation_intersected.index.duplicated(keep='first')]
master_meterssensors_for_validation_intersected.head(2)
planon_meterssensors_intersected.head(2)
planon_meterssensors_intersected==master_meterssensors_for_validation_intersected
np.all(planon_meterssensors_intersected==master_meterssensors_for_validation_intersected)
(planon_meterssensors_intersected==master_meterssensors_for_validation_intersected).sum()
(planon_meterssensors_intersected==master_meterssensors_for_validation_intersected).sum()/\
len(planon_meterssensors_intersected)*100
((planon_meterssensors_intersected==master_meterssensors_for_validation_intersected).sum()/\
len(planon_meterssensors_intersected)*100).plot(kind='bar')
df=pd.DataFrame((planon_meterssensors_intersected.T==master_meterssensors_for_validation_intersected.T).sum())
df
df=pd.DataFrame((planon_meterssensors_intersected.T==master_meterssensors_for_validation_intersected.T).sum()/\
len(planon_meterssensors_intersected.T)*100)
df[df[0]<100]
sum(planon_meterssensors_intersected['Description']!=master_meterssensors_for_validation_intersected['Description'])
planon_meterssensors_intersected['Description']=[str(s).lower().strip()\
.replace(' ',' ').replace(' ',' ').replace('nan','')\
for s in planon_meterssensors_intersected['Description'].values]
master_meterssensors_for_validation_intersected['Description']=[str(s).lower().strip()\
.replace(' ',' ').replace(' ',' ').replace('nan','')\
for s in master_meterssensors_for_validation_intersected['Description'].values]
sum(planon_meterssensors_intersected['Description']!=master_meterssensors_for_validation_intersected['Description'])
for i in planon_meterssensors_intersected[planon_meterssensors_intersected['Description']!=master_meterssensors_for_validation_intersected['Description']].index:
print(i,'\t\tPlanon:',planon_meterssensors_intersected.loc[i]['Description'],'\t\tMaster:',master_meterssensors_for_validation_intersected.loc[i]['Description'])
sum(planon_meterssensors_intersected['Logger Channel']!=master_meterssensors_for_validation_intersected['Logger Channel'])
planon_meterssensors_intersected['Logger Channel']=[str(s).lower().strip().replace(' ',' ').replace(' ',' ') for s in planon_meterssensors_intersected['Logger Channel'].values]
master_meterssensors_for_validation_intersected['Logger Channel']=[str(s).lower().strip().replace(' ',' ').replace(' ',' ') for s in master_meterssensors_for_validation_intersected['Logger Channel'].values]
sum(planon_meterssensors_intersected['Logger Channel']!=master_meterssensors_for_validation_intersected['Logger Channel'])
for i in planon_meterssensors_intersected[planon_meterssensors_intersected['Logger Channel']!=master_meterssensors_for_validation_intersected['Logger Channel']].index:
print(i,'\t\tPlanon:',planon_meterssensors_intersected.loc[i]['Logger Channel'],'\t\tMaster:',master_meterssensors_for_validation_intersected.loc[i]['Logger Channel'])
(planon_meterssensors_intersected!=master_meterssensors_for_validation_intersected).sum()/\
len(planon_meterssensors_intersected)*100
buildings=set(planon_loggerscontrollers['BuildingNo.'])
buildings
master_loggerscontrollers_for_validation = \
pd.concat([master_loggerscontrollers.loc[master_loggerscontrollers['Building Code'] == building] \
for building in buildings])
master_loggerscontrollers_for_validation.head(2)
len(master_loggerscontrollers_for_validation)
len(planon_loggerscontrollers)-len(planon_loggerscontrollers.index[planon_loggerscontrollers.index.duplicated()])
master_loggerscontrollers_for_validation.sort_index(inplace=True)
planon_loggerscontrollers.sort_index(inplace=True)
planon_loggerscontrollers.T
master_loggerscontrollers_for_validation.T
#Planon:Master
loggers_match_dict={
"BuildingNo.":"Building Code",
"Building":"Building Name",
"Description":"Description",
"EIS ID":"Logger Serial Number",
"Make":"Make",
"Model":"Model",
"Code":"Code"
}
master_loggerscontrollers_for_validation_filtered=master_loggerscontrollers_for_validation[list(loggers_match_dict.values())]
planon_loggerscontrollers_filtered=planon_loggerscontrollers[list(loggers_match_dict.keys())]
master_loggerscontrollers_for_validation_filtered.head(2)
planon_loggerscontrollers_filtered.head(2)
planon_loggerscontrollers_filtered.columns=[loggers_match_dict[i] for i in planon_loggerscontrollers_filtered]
planon_loggerscontrollers_filtered.drop_duplicates(inplace=True)
master_loggerscontrollers_for_validation_filtered.drop_duplicates(inplace=True)
planon_loggerscontrollers_filtered.head(2)
master_loggerscontrollers_for_validation_filtered.head(2)
a=np.sort(list(set(planon_loggerscontrollers_filtered.index)))
b=np.sort(list(set(master_loggerscontrollers_for_validation_filtered.index)))
loggerscontrollers_not_in_planon=[]
for i in b:
if i not in a:
print(i+',',end=" "),
loggerscontrollers_not_in_planon.append(i)
print('\n\nLoggers in Master, but not in Planon:',
len(loggerscontrollers_not_in_planon),'/',len(b),':',
round(len(loggerscontrollers_not_in_planon)/len(b)*100,3),'%')
a=np.sort(list(set(planon_loggerscontrollers_filtered.index)))
b=np.sort(list(set(master_loggerscontrollers_for_validation_filtered.index)))
loggerscontrollers_not_in_master=[]
for i in a:
if i not in b:
print(i+',',end=" "),
loggerscontrollers_not_in_master.append(i)
print('\n\nLoggers in Planon, not in Master:',
len(loggerscontrollers_not_in_master),'/',len(a),':',
round(len(loggerscontrollers_not_in_master)/len(a)*100,3),'%')
print(len(planon_loggerscontrollers_filtered.index))
print(len(set(planon_loggerscontrollers_filtered.index)))
print(len(master_loggerscontrollers_for_validation_filtered.index))
print(len(set(master_loggerscontrollers_for_validation_filtered.index)))
master_loggerscontrollers_for_validation_filtered[master_loggerscontrollers_for_validation_filtered.index.duplicated()]
comon_index=list(set(master_loggerscontrollers_for_validation_filtered.index).intersection(set(planon_loggerscontrollers_filtered.index)))
master_loggerscontrollers_for_validation_intersected=master_loggerscontrollers_for_validation_filtered.loc[comon_index].sort_index()
planon_loggerscontrollers_intersected=planon_loggerscontrollers_filtered.loc[comon_index].sort_index()
master_loggerscontrollers_for_validation_intersected.head(2)
planon_loggerscontrollers_intersected.head(2)
planon_loggerscontrollers_intersected==master_loggerscontrollers_for_validation_intersected
(planon_loggerscontrollers_intersected==master_loggerscontrollers_for_validation_intersected).sum()
(planon_loggerscontrollers_intersected==master_loggerscontrollers_for_validation_intersected).sum()/\
len(planon_loggerscontrollers_intersected)*100
((planon_loggerscontrollers_intersected==master_loggerscontrollers_for_validation_intersected).sum()/\
len(planon_loggerscontrollers_intersected)*100).plot(kind='bar')
sum(planon_loggerscontrollers_intersected['Building Name']!=master_loggerscontrollers_for_validation_intersected['Building Name'])
planon_loggerscontrollers_intersected['Building Name']=[str(s).lower().strip().replace(' ',' ').replace(' ',' ') for s in planon_loggerscontrollers_intersected['Building Name'].values]
master_loggerscontrollers_for_validation_intersected['Building Name']=[str(s).lower().strip().replace(' ',' ').replace(' ',' ') for s in master_loggerscontrollers_for_validation_intersected['Building Name'].values]
sum(planon_loggerscontrollers_intersected['Building Name']!=master_loggerscontrollers_for_validation_intersected['Building Name'])
for i in planon_loggerscontrollers_intersected[planon_loggerscontrollers_intersected['Building Name']!=master_loggerscontrollers_for_validation_intersected['Building Name']].index:
print(i,'\t\tPlanon:',planon_loggerscontrollers_intersected.loc[i]['Building Name'],'\t\tMaster:',master_loggerscontrollers_for_validation_intersected.loc[i]['Building Name'])
sum(planon_loggerscontrollers_intersected['Logger Serial Number']!=master_loggerscontrollers_for_validation_intersected['Logger Serial Number'])
planon_loggerscontrollers_intersected['Logger Serial Number']=[str(s).lower().strip().replace(' ',' ').replace(' ',' ').replace('{','').replace('}','') for s in planon_loggerscontrollers_intersected['Logger Serial Number'].values]
master_loggerscontrollers_for_validation_intersected['Logger Serial Number']=[str(s).lower().strip().replace(' ',' ').replace(' ',' ').replace('{','').replace('}','') for s in master_loggerscontrollers_for_validation_intersected['Logger Serial Number'].values]
sum(planon_loggerscontrollers_intersected['Logger Serial Number']!=master_loggerscontrollers_for_validation_intersected['Logger Serial Number'])
for i in planon_loggerscontrollers_intersected[planon_loggerscontrollers_intersected['Logger Serial Number']!=master_loggerscontrollers_for_validation_intersected['Logger Serial Number']].index:
print(i,'\t\tPlanon:',planon_loggerscontrollers_intersected.loc[i]['Logger Serial Number'],'\t\tMaster:',master_loggerscontrollers_for_validation_intersected.loc[i]['Logger Serial Number'])
z1=[]
z2=[]
for i in planon_loggerscontrollers_intersected.index:
if planon_loggerscontrollers_intersected.loc[i]['Logger Serial Number']!=master_loggerscontrollers_for_validation_intersected.loc[i]['Logger Serial Number']:
if float(planon_loggerscontrollers_intersected.loc[i]['Logger Serial Number'])==\
float(master_loggerscontrollers_for_validation_intersected.loc[i]['Logger Serial Number']):
z1.append(str(int(float(planon_loggerscontrollers_intersected.loc[i]['Logger Serial Number']))))
z2.append(str(int(float(planon_loggerscontrollers_intersected.loc[i]['Logger Serial Number']))))
else:
z1.append(planon_loggerscontrollers_intersected.loc[i]['Logger Serial Number'])
z2.append(master_loggerscontrollers_for_validation_intersected.loc[i]['Logger Serial Number'])
else:
z1.append(planon_loggerscontrollers_intersected.loc[i]['Logger Serial Number'])
z2.append(planon_loggerscontrollers_intersected.loc[i]['Logger Serial Number'])
planon_loggerscontrollers_intersected['Logger Serial Number']=z1
master_loggerscontrollers_for_validation_intersected['Logger Serial Number']=z2
for i in planon_loggerscontrollers_intersected[planon_loggerscontrollers_intersected['Logger Serial Number']!=master_loggerscontrollers_for_validation_intersected['Logger Serial Number']].index:
print(i,'\t\tPlanon:',planon_loggerscontrollers_intersected.loc[i]['Logger Serial Number'],'\t\tMaster:',master_loggerscontrollers_for_validation_intersected.loc[i]['Logger Serial Number'])
(planon_loggerscontrollers_intersected!=master_loggerscontrollers_for_validation_intersected).sum()/\
len(planon_loggerscontrollers_intersected)*100
a=np.sort(list(set(planon_meterssensors_filtered.index)))
b=np.sort(list(set(master_meterssensors_for_validation_filtered.index)))
meterssensors_not_in_planon=[]
for i in b:
if i not in a:
print(i+',',end=" "),
meterssensors_not_in_planon.append(i)
print('\n\nMeters in Master, but not in Planon:',
len(meterssensors_not_in_planon),'/',len(b),':',
round(len(meterssensors_not_in_planon)/len(b)*100,3),'%')
q1=pd.DataFrame(meterssensors_not_in_planon)
a=np.sort(list(set(planon_meterssensors_filtered.index)))
b=np.sort(list(set(master_meterssensors_for_validation_filtered.index)))
meterssensors_not_in_master=[]
for i in a:
if i not in b:
print(i+',',end=" "),
meterssensors_not_in_master.append(i)
print('\n\nMeters in Planon, not in Master:',
len(meterssensors_not_in_master),'/',len(a),':',
round(len(meterssensors_not_in_master)/len(a)*100,3),'%')
q2=pd.DataFrame(meterssensors_not_in_master)
a=np.sort(list(set(planon_loggerscontrollers_filtered.index)))
b=np.sort(list(set(master_loggerscontrollers_for_validation_filtered.index)))
loggerscontrollers_not_in_planon=[]
for i in b:
if i not in a:
print(i+',',end=" "),
loggerscontrollers_not_in_planon.append(i)
print('\n\nLoggers in Master, but not in Planon:',
len(loggerscontrollers_not_in_planon),'/',len(b),':',
round(len(loggerscontrollers_not_in_planon)/len(b)*100,3),'%')
q3=pd.DataFrame(loggerscontrollers_not_in_planon)
a=np.sort(list(set(planon_loggerscontrollers_filtered.index)))
b=np.sort(list(set(master_loggerscontrollers_for_validation_filtered.index)))
loggerscontrollers_not_in_master=[]
for i in a:
if i not in b:
print(i+',',end=" "),
loggerscontrollers_not_in_master.append(i)
print('\n\nLoggers in Planon, not in Master:',
len(loggerscontrollers_not_in_master),'/',len(a),':',
round(len(loggerscontrollers_not_in_master)/len(a)*100,3),'%')
q4=pd.DataFrame(loggerscontrollers_not_in_master)
q5=pd.DataFrame((planon_meterssensors_intersected!=master_meterssensors_for_validation_intersected).sum()/\
len(planon_meterssensors_intersected)*100)
q6=pd.DataFrame((planon_loggerscontrollers_intersected!=master_loggerscontrollers_for_validation_intersected).sum()/\
len(planon_loggerscontrollers_intersected)*100)
w1=[]
for i in planon_meterssensors_intersected[planon_meterssensors_intersected['Description']!=master_meterssensors_for_validation_intersected['Description']].index:
w1.append({"Meter":i,'Planon':planon_meterssensors_intersected.loc[i]['Description'],
'Master':master_meterssensors_for_validation_intersected.loc[i]['Description']})
q7=pd.DataFrame(w1)
w2=[]
for i in planon_loggerscontrollers_intersected[planon_loggerscontrollers_intersected['Building Name']!=master_loggerscontrollers_for_validation_intersected['Building Name']].index:
w2.append({"Logger":i,'Planon':planon_loggerscontrollers_intersected.loc[i]['Building Name'],
'Master':master_loggerscontrollers_for_validation_intersected.loc[i]['Building Name']})
q8=pd.DataFrame(w2)
writer = pd.ExcelWriter('final5b.xlsx')
q1.to_excel(writer,'Meters Master, not Planon')
q2.to_excel(writer,'Meters Planon, not Master')
q3.to_excel(writer,'Loggers Master, not Planon')
q4.to_excel(writer,'Loggers Planon, not Master')
q5.to_excel(writer,'Meters error perc')
q6.to_excel(writer,'Loggers error perc')
q7.to_excel(writer,'Meters naming conflcits')
q1
q9=[]
try:
for i in q1[0].values:
if i[:i.find('/')] not in set(q3[0].values):
q9.append(i)
except:pass
pd.DataFrame(q9).to_excel(writer,'Meters Master, not Planon, not Logger')
writer.save()
| 0.229535 | 0.898277 |
```
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import io, base64, os, json, re
import pandas as pd
import numpy as np
import datetime
import warnings
warnings.filterwarnings('ignore')
# path_to_market_data = '/dataset/^GSPC.csv'
# create an MACD indicators
def MACD(df, feature, fast=9, medium=12, long=26):
# WARNING - Feed data in ascending order only (i.e. first row should be your oldest print)
tmp_df = df.copy()
tmp_df['EXP1'] = tmp_df[feature].ewm(span=medium, adjust=False).mean()
tmp_df['EXP2'] = tmp_df[feature].ewm(span=long, adjust=False).mean()
tmp_df['MACD'] = tmp_df['EXP1']-tmp_df['EXP2']
tmp_df['EXP3'] = tmp_df['MACD'].ewm(span=fast, adjust=False).mean()
tmp_df['MACD_Hist'] = tmp_df['MACD'] - tmp_df['EXP3']
return tmp_df
# S&P 500
sp500_df = pd.read_csv('dataset/GSPC.csv')
sp500_df['Date'] = pd.to_datetime(sp500_df['Date'])
sp500_df = sp500_df[['Date','Adj Close']]
sp500_df.columns = ['Date','SP500 Close'] # create 2 columns for our table
print(np.min(sp500_df['Date']), np.max(sp500_df['Date']))
sp500_df = sp500_df.sort_values('Date',ascending=True) # dates asorted
sp500_df.tail()
# Case-Shiller U.S. National Home Price Index (CSUSHPINSA)
caseshill_df = pd.read_csv('dataset/CSUSHPINSA.csv')
caseshill_df['DATE'] = pd.to_datetime(caseshill_df['DATE'])
caseshill_df.columns = ['Date', 'Case-Shiller'] # create 2 columns for our table
caseshill_df = caseshill_df.sort_values('Date')
print('caseshill_df:',np.min(caseshill_df['Date']),np.max(caseshill_df['Date']))
caseshill_df.tail()
# Average Hourly Earnings of Production and Nonsupervisory Employees
avghourly_df = pd.read_csv('dataset/AHETPI.csv')
avghourly_df['DATE'] = pd.to_datetime(avghourly_df['DATE'])
avghourly_df.columns = ['Date', 'AHETPI']
avghourly_df = avghourly_df.sort_values('Date')
print('avghourly_df:',np.min(avghourly_df['Date']),np.max(avghourly_df['Date']))
avghourly_df.tail()
# Consumer Price Index for All Urban Consumers
cpi_df = pd.read_csv('dataset/CUSR0000SEHC.csv')
cpi_df['DATE'] = pd.to_datetime(cpi_df['DATE'])
cpi_df.columns = ['Date', 'CPI']
cpi_df = cpi_df.sort_values('Date')
print('cpi_df:', np.min(cpi_df['Date']), np.max(cpi_df['Date']))
cpi_df.tail()
```
## Now to plot all dataset
```
cut_off_date = '1988-01-01'
sp500_df_tmp = sp500_df.copy() # create copy of df
caseshill_df_tmp = caseshill_df.copy() # create copy of df
avghourly_df_tmp = avghourly_df.copy() # create copy of df
cpi_df_tmp = cpi_df.copy() # create copy of df
sp500_df_tmp = sp500_df_tmp[sp500_df_tmp['Date'] >= cut_off_date]
caseshill_df_tmp = caseshill_df_tmp[caseshill_df_tmp['Date'] >= cut_off_date]
avghourly_df_tmp = avghourly_df_tmp[avghourly_df_tmp['Date'] >= cut_off_date]
cpi_df_tmp = cpi_df_tmp[cpi_df_tmp['Date'] >= cut_off_date]
# join both datasets together
fig, ax = plt.subplots(figsize=(16, 8))
plt.plot(sp500_df_tmp['Date'], sp500_df_tmp['SP500 Close'] , color='blue', label='SP500_Close')
plt.title('SP500, Case-Shiller, AHETPI, CPI')
plt.grid()
# get new axis
ax2 = ax.twinx()
plt.plot(caseshill_df_tmp['Date'], caseshill_df_tmp['Case-Shiller'] , color='red', label='Case-Shiller')
# get new axis
ax3 = ax.twinx()
plt.plot(avghourly_df_tmp['Date'], avghourly_df_tmp['AHETPI'] , color='green', label='AHETPI')
# get new axis
ax4 = ax.twinx()
plt.plot(cpi_df_tmp['Date'], cpi_df_tmp['CPI'] , color='brown', label='CPI')
plt.show()
```
## Adjusting for 2012 = 100
```
sp500_df_tmp[sp500_df_tmp['Date'] == '2012-01-03']
print(100/1277.060059)
sp500_df_tmp['SP500 Close-adj'] = sp500_df_tmp['SP500 Close'] * 0.07830485284952445
caseshill_df_tmp[caseshill_df_tmp['Date'] == '2012-01-01']
print(100/134.164)
caseshill_df_tmp['Case-Shiller-adj'] = caseshill_df_tmp['Case-Shiller'] * 0.7453564294445605
avghourly_df[avghourly_df['Date'] == '2012-01-01']
print(100/19.58)
avghourly_df_tmp['AHETPI-adj'] = avghourly_df_tmp['AHETPI'] * 5.107252298263535
cpi_df[cpi_df['Date'] == '2012-01-01']
print(100/262.366)
cpi_df_tmp['CPI-adj'] = cpi_df_tmp['CPI'] * 0.3811469473940983
# join both datasets together
fig, ax = plt.subplots(figsize=(16, 8))
plt.plot(sp500_df_tmp['Date'], sp500_df_tmp['SP500 Close-adj'] , color='blue', label='SP500_Close')
plt.title('SP500, Case-Shiller, AHETPI, CPI - Ajusted to 2012=100')
plt.plot(caseshill_df_tmp['Date'], caseshill_df_tmp['Case-Shiller-adj'] , color='red', label='Case-Shiller')
plt.plot(avghourly_df_tmp['Date'], avghourly_df_tmp['AHETPI-adj'] , color='green', label='AHETPI')
plt.plot(cpi_df_tmp['Date'], cpi_df_tmp['CPI-adj'] , color='brown', label='CPI')
plt.grid()
plt.show()
# VNQ
vnq_df = pd.read_csv(path_to_market_data + 'VNQ.csv')
vnq_df['Date'] = pd.to_datetime(vnq_df['Date'])
vnq_df = vnq_df[['Date','Adj Close']]
vnq_df.columns = ['Date', 'VNQ_Close']
print(np.min(vnq_df['Date'] ),np.max(vnq_df['Date'] ))
vnq_df = vnq_df.sort_values('Date', ascending=True) # sort in ascending date order
vnq_df.tail()
```
|
github_jupyter
|
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import io, base64, os, json, re
import pandas as pd
import numpy as np
import datetime
import warnings
warnings.filterwarnings('ignore')
# path_to_market_data = '/dataset/^GSPC.csv'
# create an MACD indicators
def MACD(df, feature, fast=9, medium=12, long=26):
# WARNING - Feed data in ascending order only (i.e. first row should be your oldest print)
tmp_df = df.copy()
tmp_df['EXP1'] = tmp_df[feature].ewm(span=medium, adjust=False).mean()
tmp_df['EXP2'] = tmp_df[feature].ewm(span=long, adjust=False).mean()
tmp_df['MACD'] = tmp_df['EXP1']-tmp_df['EXP2']
tmp_df['EXP3'] = tmp_df['MACD'].ewm(span=fast, adjust=False).mean()
tmp_df['MACD_Hist'] = tmp_df['MACD'] - tmp_df['EXP3']
return tmp_df
# S&P 500
sp500_df = pd.read_csv('dataset/GSPC.csv')
sp500_df['Date'] = pd.to_datetime(sp500_df['Date'])
sp500_df = sp500_df[['Date','Adj Close']]
sp500_df.columns = ['Date','SP500 Close'] # create 2 columns for our table
print(np.min(sp500_df['Date']), np.max(sp500_df['Date']))
sp500_df = sp500_df.sort_values('Date',ascending=True) # dates asorted
sp500_df.tail()
# Case-Shiller U.S. National Home Price Index (CSUSHPINSA)
caseshill_df = pd.read_csv('dataset/CSUSHPINSA.csv')
caseshill_df['DATE'] = pd.to_datetime(caseshill_df['DATE'])
caseshill_df.columns = ['Date', 'Case-Shiller'] # create 2 columns for our table
caseshill_df = caseshill_df.sort_values('Date')
print('caseshill_df:',np.min(caseshill_df['Date']),np.max(caseshill_df['Date']))
caseshill_df.tail()
# Average Hourly Earnings of Production and Nonsupervisory Employees
avghourly_df = pd.read_csv('dataset/AHETPI.csv')
avghourly_df['DATE'] = pd.to_datetime(avghourly_df['DATE'])
avghourly_df.columns = ['Date', 'AHETPI']
avghourly_df = avghourly_df.sort_values('Date')
print('avghourly_df:',np.min(avghourly_df['Date']),np.max(avghourly_df['Date']))
avghourly_df.tail()
# Consumer Price Index for All Urban Consumers
cpi_df = pd.read_csv('dataset/CUSR0000SEHC.csv')
cpi_df['DATE'] = pd.to_datetime(cpi_df['DATE'])
cpi_df.columns = ['Date', 'CPI']
cpi_df = cpi_df.sort_values('Date')
print('cpi_df:', np.min(cpi_df['Date']), np.max(cpi_df['Date']))
cpi_df.tail()
cut_off_date = '1988-01-01'
sp500_df_tmp = sp500_df.copy() # create copy of df
caseshill_df_tmp = caseshill_df.copy() # create copy of df
avghourly_df_tmp = avghourly_df.copy() # create copy of df
cpi_df_tmp = cpi_df.copy() # create copy of df
sp500_df_tmp = sp500_df_tmp[sp500_df_tmp['Date'] >= cut_off_date]
caseshill_df_tmp = caseshill_df_tmp[caseshill_df_tmp['Date'] >= cut_off_date]
avghourly_df_tmp = avghourly_df_tmp[avghourly_df_tmp['Date'] >= cut_off_date]
cpi_df_tmp = cpi_df_tmp[cpi_df_tmp['Date'] >= cut_off_date]
# join both datasets together
fig, ax = plt.subplots(figsize=(16, 8))
plt.plot(sp500_df_tmp['Date'], sp500_df_tmp['SP500 Close'] , color='blue', label='SP500_Close')
plt.title('SP500, Case-Shiller, AHETPI, CPI')
plt.grid()
# get new axis
ax2 = ax.twinx()
plt.plot(caseshill_df_tmp['Date'], caseshill_df_tmp['Case-Shiller'] , color='red', label='Case-Shiller')
# get new axis
ax3 = ax.twinx()
plt.plot(avghourly_df_tmp['Date'], avghourly_df_tmp['AHETPI'] , color='green', label='AHETPI')
# get new axis
ax4 = ax.twinx()
plt.plot(cpi_df_tmp['Date'], cpi_df_tmp['CPI'] , color='brown', label='CPI')
plt.show()
sp500_df_tmp[sp500_df_tmp['Date'] == '2012-01-03']
print(100/1277.060059)
sp500_df_tmp['SP500 Close-adj'] = sp500_df_tmp['SP500 Close'] * 0.07830485284952445
caseshill_df_tmp[caseshill_df_tmp['Date'] == '2012-01-01']
print(100/134.164)
caseshill_df_tmp['Case-Shiller-adj'] = caseshill_df_tmp['Case-Shiller'] * 0.7453564294445605
avghourly_df[avghourly_df['Date'] == '2012-01-01']
print(100/19.58)
avghourly_df_tmp['AHETPI-adj'] = avghourly_df_tmp['AHETPI'] * 5.107252298263535
cpi_df[cpi_df['Date'] == '2012-01-01']
print(100/262.366)
cpi_df_tmp['CPI-adj'] = cpi_df_tmp['CPI'] * 0.3811469473940983
# join both datasets together
fig, ax = plt.subplots(figsize=(16, 8))
plt.plot(sp500_df_tmp['Date'], sp500_df_tmp['SP500 Close-adj'] , color='blue', label='SP500_Close')
plt.title('SP500, Case-Shiller, AHETPI, CPI - Ajusted to 2012=100')
plt.plot(caseshill_df_tmp['Date'], caseshill_df_tmp['Case-Shiller-adj'] , color='red', label='Case-Shiller')
plt.plot(avghourly_df_tmp['Date'], avghourly_df_tmp['AHETPI-adj'] , color='green', label='AHETPI')
plt.plot(cpi_df_tmp['Date'], cpi_df_tmp['CPI-adj'] , color='brown', label='CPI')
plt.grid()
plt.show()
# VNQ
vnq_df = pd.read_csv(path_to_market_data + 'VNQ.csv')
vnq_df['Date'] = pd.to_datetime(vnq_df['Date'])
vnq_df = vnq_df[['Date','Adj Close']]
vnq_df.columns = ['Date', 'VNQ_Close']
print(np.min(vnq_df['Date'] ),np.max(vnq_df['Date'] ))
vnq_df = vnq_df.sort_values('Date', ascending=True) # sort in ascending date order
vnq_df.tail()
| 0.213541 | 0.538134 |
<a href="https://colab.research.google.com/github/Ruwai/DS-Sprint-01-Dealing-With-Data/blob/master/DS_Unit_1_Sprint_Challenge_1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Data Science Unit 1 Sprint Challenge 1
## Loading, cleaning, visualizing, and analyzing data
In this sprint challenge you will look at a dataset of the survival of patients who underwent surgery for breast cancer.
http://archive.ics.uci.edu/ml/datasets/Haberman%27s+Survival
Data Set Information:
The dataset contains cases from a study that was conducted between 1958 and 1970 at the University of Chicago's Billings Hospital on the survival of patients who had undergone surgery for breast cancer.
Attribute Information:
1. Age of patient at time of operation (numerical)
2. Patient's year of operation (year - 1900, numerical)
3. Number of positive axillary nodes detected (numerical)
4. Survival status (class attribute)
-- 1 = the patient survived 5 years or longer
-- 2 = the patient died within 5 year
Sprint challenges are evaluated based on satisfactory completion of each part. It is suggested you work through it in order, getting each aspect reasonably working, before trying to deeply explore, iterate, or refine any given step. Once you get to the end, if you want to go back and improve things, go for it!
## Part 1 - Load and validate the data
- Load the data as a `pandas` data frame.
- Validate that it has the appropriate number of observations (you can check the raw file, and also read the dataset description from UCI).
- Validate that you have no missing values.
- Add informative names to the features.
- The survival variable is encoded as 1 for surviving >5 years and 2 for not - change this to be 0 for not surviving and 1 for surviving >5 years (0/1 is a more traditional encoding of binary variables)
At the end, print the first five rows of the dataset to demonstrate the above.
```
# TODO
import pandas as pd
import numpy as np
df = pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/haberman/haberman.data')
df.columns = ['age', 'year_of_op', 'nodes_detected', 'survival_status']
df
df.describe()
dir(df)
df.isnull().sum()
df.info()
df.count()
df.head()
```
## Part 2 - Examine the distribution and relationships of the features
Explore the data - create at least *2* tables (can be summary statistics or crosstabulations) and *2* plots illustrating the nature of the data.
This is open-ended, so to remind - first *complete* this task as a baseline, then go on to the remaining sections, and *then* as time allows revisit and explore further.
Hint - you may need to bin some variables depending on your chosen tables/plots.
```
# TODO
q = pd.crosstab(df['age'], df['year_of_op'])
q
age_bin = pd.cut(df['age'], 10)
w = pd.crosstab(age_bin, df['survival_status'])
w
year_op_bin = pd.cut(df['year_of_op'], 10)
r = pd.crosstab(year_op_bin, df['survival_status'])
r
### 1 == survive 5 years of longer 2 == survived 5 years or less
node_bin = pd.cut(df['nodes_detected'], 10)
e = pd.crosstab(node_bin, df['survival_status'])
e
!pip install seaborn==0.9.0
import seaborn as sns
!pip install matplotlib==1.4.3
import statsmodels
import scipy
!pip freeze | grep numpy
sns.regplot(x='year_of_op', y='nodes_detected', scatter=True,
color='r', data=df)
# nani?
sns.jointplot(x=df['age'], y=df['year_of_op'], kind='hex', color='b')
fig, ax = plt.subplots()
sns.distplot(df['age'])
sns.distplot(df['year_of_op'])
ax.set_xlabel('Age')
plt.title('Distribution of ages')
plt.show()
fig, ax = plt.subplots(figsize=(20,10))
sns.set_color_codes(palette='pastel')
sns.barplot(x='year_of_op', y='nodes_detected', data=df, label='year of operation',
color='r')
sns.set_color_codes(palette='muted')
sns.barplot(x='age', y='nodes_detected', data=df, label='age', color='r')
sns.despine(left=True, bottom=True)
copy_df = df.copy()
copy_df = copy_df.drop(['nodes_detected'], axis=1)
copy_df
#i can't drop it?
# nevermind
corr = copy_df.corr()
corr
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
fig, ax = plt.subplots(figsize=(15,8))
cmap = sns.diverging_palette(220,10, as_cmap=True)
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.4, center=0,
square=True, linewidths=.5, cbar_kws={'shrink':.5})
```
## Part 3 - Analysis and Interpretation
Now that you've looked at the data, answer the following questions:
- What is at least one feature that looks to have a positive relationship with survival?
It seems to be that at least more than 50% of the population was able to survive for 5 years or more if they had less than 5 nodes detected.
- What is at least one feature that looks to have a negative relationship with survival?
From the data we can infer that if you were in your mid 40s, to high 50s and were operated on before 1960, you were more likely to not survive
- How are those two features related with each other, and what might that mean?
The relationship between the age of the patient, the year that they were operated, and how many nodes were detected is indicative of survival because of these interpretable factors: 20% of the population had their operation at the beginning of the trial before 1960, the majority of people who did not survive before 1960 were between the ages : 40 and 60
From the heatmap we can also infer the most common trend between year of operation and age as well as survival status and age.
Answer with text, but feel free to intersperse example code/results or refer to it from earlier.
```
```
|
github_jupyter
|
# TODO
import pandas as pd
import numpy as np
df = pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/haberman/haberman.data')
df.columns = ['age', 'year_of_op', 'nodes_detected', 'survival_status']
df
df.describe()
dir(df)
df.isnull().sum()
df.info()
df.count()
df.head()
# TODO
q = pd.crosstab(df['age'], df['year_of_op'])
q
age_bin = pd.cut(df['age'], 10)
w = pd.crosstab(age_bin, df['survival_status'])
w
year_op_bin = pd.cut(df['year_of_op'], 10)
r = pd.crosstab(year_op_bin, df['survival_status'])
r
### 1 == survive 5 years of longer 2 == survived 5 years or less
node_bin = pd.cut(df['nodes_detected'], 10)
e = pd.crosstab(node_bin, df['survival_status'])
e
!pip install seaborn==0.9.0
import seaborn as sns
!pip install matplotlib==1.4.3
import statsmodels
import scipy
!pip freeze | grep numpy
sns.regplot(x='year_of_op', y='nodes_detected', scatter=True,
color='r', data=df)
# nani?
sns.jointplot(x=df['age'], y=df['year_of_op'], kind='hex', color='b')
fig, ax = plt.subplots()
sns.distplot(df['age'])
sns.distplot(df['year_of_op'])
ax.set_xlabel('Age')
plt.title('Distribution of ages')
plt.show()
fig, ax = plt.subplots(figsize=(20,10))
sns.set_color_codes(palette='pastel')
sns.barplot(x='year_of_op', y='nodes_detected', data=df, label='year of operation',
color='r')
sns.set_color_codes(palette='muted')
sns.barplot(x='age', y='nodes_detected', data=df, label='age', color='r')
sns.despine(left=True, bottom=True)
copy_df = df.copy()
copy_df = copy_df.drop(['nodes_detected'], axis=1)
copy_df
#i can't drop it?
# nevermind
corr = copy_df.corr()
corr
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
fig, ax = plt.subplots(figsize=(15,8))
cmap = sns.diverging_palette(220,10, as_cmap=True)
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.4, center=0,
square=True, linewidths=.5, cbar_kws={'shrink':.5})
| 0.208743 | 0.985691 |
<a href="https://colab.research.google.com/github/zhangguanheng66/examples/blob/master/Welcome_To_Colaboratory.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
<p><img alt="Colaboratory logo" height="45px" src="/img/colab_favicon.ico" align="left" hspace="10px" vspace="0px"></p>
<h1>What is Colaboratory?</h1>
Colaboratory, or "Colab" for short, allows you to write and execute Python in your browser, with
- Zero configuration required
- Free access to GPUs
- Easy sharing
Whether you're a **student**, a **data scientist** or an **AI researcher**, Colab can make your work easier. Watch [Introduction to Colab](https://www.youtube.com/watch?v=inN8seMm7UI) to learn more, or just get started below!
```
import torchtext
import torch
print(torch.__file__)
print(torch.__version__)
print(torchtext.__file__)
print(torchtext.__version__)
%%shell
pip install numpy
pip install --pre torch torchtext -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html
import torchtext
import torch
print(torch.__file__)
print(torch.__version__)
print(torchtext.__file__)
print(torchtext.__version__)
```
## **Getting started**
The document you are reading is not a static web page, but an interactive environment called a **Colab notebook** that lets you write and execute code.
For example, here is a **code cell** with a short Python script that computes a value, stores it in a variable, and prints the result:
```
seconds_in_a_day = 24 * 60 * 60
seconds_in_a_day
```
To execute the code in the above cell, select it with a click and then either press the play button to the left of the code, or use the keyboard shortcut "Command/Ctrl+Enter". To edit the code, just click the cell and start editing.
Variables that you define in one cell can later be used in other cells:
```
seconds_in_a_week = 7 * seconds_in_a_day
seconds_in_a_week
```
Colab notebooks allow you to combine **executable code** and **rich text** in a single document, along with **images**, **HTML**, **LaTeX** and more. When you create your own Colab notebooks, they are stored in your Google Drive account. You can easily share your Colab notebooks with co-workers or friends, allowing them to comment on your notebooks or even edit them. To learn more, see [Overview of Colab](/notebooks/basic_features_overview.ipynb). To create a new Colab notebook you can use the File menu above, or use the following link: [create a new Colab notebook](http://colab.research.google.com#create=true).
Colab notebooks are Jupyter notebooks that are hosted by Colab. To learn more about the Jupyter project, see [jupyter.org](https://www.jupyter.org).
## Data science
With Colab you can harness the full power of popular Python libraries to analyze and visualize data. The code cell below uses **numpy** to generate some random data, and uses **matplotlib** to visualize it. To edit the code, just click the cell and start editing.
```
import numpy as np
from matplotlib import pyplot as plt
ys = 200 + np.random.randn(100)
x = [x for x in range(len(ys))]
plt.plot(x, ys, '-')
plt.fill_between(x, ys, 195, where=(ys > 195), facecolor='g', alpha=0.6)
plt.title("Sample Visualization")
plt.show()
```
You can import your own data into Colab notebooks from your Google Drive account, including from spreadsheets, as well as from Github and many other sources. To learn more about importing data, and how Colab can be used for data science, see the links below under [Working with Data](#working-with-data).
## Machine learning
With Colab you can import an image dataset, train an image classifier on it, and evaluate the model, all in just [a few lines of code](https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/quickstart/beginner.ipynb). Colab notebooks execute code on Google's cloud servers, meaning you can leverage the power of Google hardware, including [GPUs and TPUs](#using-accelerated-hardware), regardless of the power of your machine. All you need is a browser.
Colab is used extensively in the machine learning community with applications including:
- Getting started with TensorFlow
- Developing and training neural networks
- Experimenting with TPUs
- Disseminating AI research
- Creating tutorials
To see sample Colab notebooks that demonstrate machine learning applications, see the [machine learning examples](#machine-learning-examples) below.
## More Resources
### Working with Notebooks in Colab
- [Overview of Colaboratory](/notebooks/basic_features_overview.ipynb)
- [Guide to Markdown](/notebooks/markdown_guide.ipynb)
- [Importing libraries and installing dependencies](/notebooks/snippets/importing_libraries.ipynb)
- [Saving and loading notebooks in GitHub](https://colab.research.google.com/github/googlecolab/colabtools/blob/master/notebooks/colab-github-demo.ipynb)
- [Interactive forms](/notebooks/forms.ipynb)
- [Interactive widgets](/notebooks/widgets.ipynb)
- <img src="/img/new.png" height="20px" align="left" hspace="4px" alt="New"></img>
[TensorFlow 2 in Colab](/notebooks/tensorflow_version.ipynb)
<a name="working-with-data"></a>
### Working with Data
- [Loading data: Drive, Sheets, and Google Cloud Storage](/notebooks/io.ipynb)
- [Charts: visualizing data](/notebooks/charts.ipynb)
- [Getting started with BigQuery](/notebooks/bigquery.ipynb)
### Machine Learning Crash Course
These are a few of the notebooks from Google's online Machine Learning course. See the [full course website](https://developers.google.com/machine-learning/crash-course/) for more.
- [Intro to Pandas](/notebooks/mlcc/intro_to_pandas.ipynb)
- [Tensorflow concepts](/notebooks/mlcc/tensorflow_programming_concepts.ipynb)
- [First steps with TensorFlow](/notebooks/mlcc/first_steps_with_tensor_flow.ipynb)
- [Intro to neural nets](/notebooks/mlcc/intro_to_neural_nets.ipynb)
- [Intro to sparse data and embeddings](/notebooks/mlcc/intro_to_sparse_data_and_embeddings.ipynb)
<a name="using-accelerated-hardware"></a>
### Using Accelerated Hardware
- [TensorFlow with GPUs](/notebooks/gpu.ipynb)
- [TensorFlow with TPUs](/notebooks/tpu.ipynb)
<a name="machine-learning-examples"></a>
## Machine Learning Examples
To see end-to-end examples of the interactive machine learning analyses that Colaboratory makes possible, check out these tutorials using models from [TensorFlow Hub](https://tfhub.dev).
A few featured examples:
- [Retraining an Image Classifier](https://tensorflow.org/hub/tutorials/tf2_image_retraining): Build a Keras model on top of a pre-trained image classifier to distinguish flowers.
- [Text Classification](https://tensorflow.org/hub/tutorials/tf2_text_classification): Classify IMDB movie reviews as either *positive* or *negative*.
- [Style Transfer](https://tensorflow.org/hub/tutorials/tf2_arbitrary_image_stylization): Use deep learning to transfer style between images.
- [Multilingual Universal Sentence Encoder Q&A](https://tensorflow.org/hub/tutorials/retrieval_with_tf_hub_universal_encoder_qa): Use a machine learning model to answer questions from the SQuAD dataset.
- [Video Interpolation](https://tensorflow.org/hub/tutorials/tweening_conv3d): Predict what happened in a video between the first and the last frame.
|
github_jupyter
|
import torchtext
import torch
print(torch.__file__)
print(torch.__version__)
print(torchtext.__file__)
print(torchtext.__version__)
%%shell
pip install numpy
pip install --pre torch torchtext -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html
import torchtext
import torch
print(torch.__file__)
print(torch.__version__)
print(torchtext.__file__)
print(torchtext.__version__)
seconds_in_a_day = 24 * 60 * 60
seconds_in_a_day
seconds_in_a_week = 7 * seconds_in_a_day
seconds_in_a_week
import numpy as np
from matplotlib import pyplot as plt
ys = 200 + np.random.randn(100)
x = [x for x in range(len(ys))]
plt.plot(x, ys, '-')
plt.fill_between(x, ys, 195, where=(ys > 195), facecolor='g', alpha=0.6)
plt.title("Sample Visualization")
plt.show()
| 0.589007 | 0.980618 |
```
import pandas as pd
import numpy as np
import seaborn as sns
import glob, os
from IPython.display import Image
from sklearn.preprocessing import StandardScaler
from sklearn.svm import LinearSVR, SVR
from sklearn import metrics
from sklearn.linear_model import LinearRegression,Ridge
import statsmodels.api as sm
from sklearn import linear_model
from sklearn.tree import DecisionTreeRegressor
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectFromModel
from sklearn.feature_selection import RFE
from sklearn import tree
import pydotplus
from sklearn.externals.six import StringIO
import matplotlib.pyplot as plt
%matplotlib inline
from collections import Counter
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2, f_regression
from sklearn.feature_selection import f_regression
from sklearn.linear_model import ElasticNet
import forward_selection as fs
from scipy.stats import chi2_contingency, pearsonr, spearmanr
dft11 = pd.read_csv("la_base.csv", encoding="latin1")
dft11["taux_bgauche"] = dft11['taux_xgauche']+ dft11['taux_gauche'] + dft11['taux_vert']
dft11["taux_bdroite"] = dft11['taux_droite']+ dft11['taux_centre']
#Taux_supp_moyenne sortant
#Popularité sortant
dft11["ecart_pop"] = dft11["pop_president"] - dft11["pop_premier_ministre"]
dft11["pop_exec"] = np.nan
dft11.ix[dft11["Ann?e"]==1981,"pop_exec"] = dft11.ix[dft11["Ann?e"]==1981,"pop_president"].iloc[0]
dft11.ix[dft11["Ann?e"]==1988,"pop_exec"] = dft11.ix[dft11["Ann?e"]==1988,"pop_president"].iloc[0]
dft11.ix[dft11["Ann?e"]==2012,"pop_exec"] = dft11.ix[dft11["Ann?e"]==2012,"pop_president"].iloc[0]
dft11.ix[dft11["Ann?e"]==1995,"pop_exec"] = dft11.ix[dft11["Ann?e"]==1995,"pop_premier_ministre"].iloc[0]
dft11.ix[dft11["Ann?e"]==2002,"pop_exec"] = dft11.ix[dft11["Ann?e"]==2002,"pop_premier_ministre"].iloc[0]
dft11.ix[dft11["Ann?e"]==2007,"pop_exec"] = dft11.ix[dft11["Ann?e"]==2007,"pop_premier_ministre"].iloc[0]
dft11.ix[dft11["Ann?e"]==1981,"pop_xdroite"] = dft11.ix[dft11["Ann?e"]==1988,"pop_xdroite"].iloc[0]
dft11.ix[dft11["Ann?e"]==1981,"pop_verts"] = dft11.ix[dft11["Ann?e"]==1995,"pop_verts"].iloc[0]
dft11.ix[dft11["Ann?e"]==1988,"pop_verts"] = dft11.ix[dft11["Ann?e"]==1995,"pop_verts"].iloc[0]
dft11.ix[dft11["Ann?e"]==1981,"nombre de logement vacant"] = 1891.
dft11["taux_sortie_sans_bloc"] = np.nan
dft11.ix[dft11["Ann?e"]==1981,"taux_sortie_sans_bloc"] = dft11.ix[dft11["Ann?e"]==1981,"taux_centre"]
dft11.ix[dft11["Ann?e"]==1988,"taux_sortie_sans_bloc"] = dft11.ix[dft11["Ann?e"]==1988,"taux_gauche"]
dft11.ix[dft11["Ann?e"]==1995,"taux_sortie_sans_bloc"] = dft11.ix[dft11["Ann?e"]==1995,"taux_gauche"]
dft11.ix[dft11["Ann?e"]==2002,"taux_sortie_sans_bloc"] = dft11.ix[dft11["Ann?e"]==2002,"taux_droite"]
dft11.ix[dft11["Ann?e"]==2007,"taux_sortie_sans_bloc"] = dft11.ix[dft11["Ann?e"]==2007,"taux_droite"]
dft11.ix[dft11["Ann?e"]==2012,"taux_sortie_sans_bloc"] = dft11.ix[dft11["Ann?e"]==2012,"taux_droite"]
dft11["taux_sortie_avec_bloc"] = np.nan
dft11.ix[dft11["Ann?e"]==1981,"taux_sortie_avec_bloc"] = dft11.ix[dft11["Ann?e"]==1981,"taux_centre"] +\
dft11.ix[dft11["Ann?e"]==1981,"taux_droite"]
dft11.ix[dft11["Ann?e"]==1988,"taux_sortie_avec_bloc"] = dft11.ix[dft11["Ann?e"]==1988,"taux_gauche"]+\
dft11.ix[dft11["Ann?e"]==1988,"taux_xgauche"] + dft11.ix[dft11["Ann?e"]==1988,"taux_vert"]
dft11.ix[dft11["Ann?e"]==1995,"taux_sortie_avec_bloc"] = dft11.ix[dft11["Ann?e"]==1995,"taux_gauche"]+\
dft11.ix[dft11["Ann?e"]==1995,"taux_xgauche"] + dft11.ix[dft11["Ann?e"]==1995,"taux_vert"]
dft11.ix[dft11["Ann?e"]==2002,"taux_sortie_avec_bloc"] = dft11.ix[dft11["Ann?e"]==2002,"taux_droite"]+\
dft11.ix[dft11["Ann?e"]==2002,"taux_centre"]
dft11.ix[dft11["Ann?e"]==2007,"taux_sortie_avec_bloc"] = dft11.ix[dft11["Ann?e"]==2007,"taux_droite"]+\
dft11.ix[dft11["Ann?e"]==2007,"taux_centre"]
dft11.ix[dft11["Ann?e"]==2012,"taux_sortie_avec_bloc"] = dft11.ix[dft11["Ann?e"]==2012,"taux_droite"]+\
dft11.ix[dft11["Ann?e"]==2012,"taux_centre"]
def create_train(target, dft11, option_fn=False):
dft11["CORS88"] = dft11.apply(lambda row : 1 if row["Ann?e"]==1988 and
row["code"] in ["2A","2B"] else 0, axis=1)
dft11["CORS02"] = dft11.apply(lambda row : 1 if row["Ann?e"]==2002 and
row["code"] in ["2A","2B"] else 0, axis=1)
dft12 = dft11.drop(["code", 'taux_Blancs et nuls', 'taux_droite', 'taux_xgauche',\
'taux_vert','taux_centre', 'Q1_rate', 'Q2_rate', 'Q3_rate', 'Q4_rate', "Total", \
'depart_OM', "Date","taux_Abstention_sup_moyenne",'taux_Blancs et nuls_sup_moyenne',
'depart_CORSE'], axis=1)
dft12["Date"] = dft12["Ann?e"] - 1981
list_dell = ["Ann?e","d?partement","taux_bgauche",'taux_Abstention',"taux_sortie_sans_bloc",
"taux_sortie_avec_bloc","taux_gauche", 'taux_xdroite',"taux_bdroite"]
if option_fn==True:
dft12 = dft12[dft12["Ann?e"]!=1981]
df_train_x = dft12.drop(list_dell, axis=1)
df_train_y = dft12[target]
return df_train_x, df_train_y
def create_train_and_test(target, dft11, option_fn=False):
dft11["CORS88"] = dft11.apply(lambda row : 1 if row["Ann?e"]==1988 and
row["code"] in ["2A","2B"] else 0, axis=1)
dft11["CORS02"] = dft11.apply(lambda row : 1 if row["Ann?e"]==2002 and
row["code"] in ["2A","2B"] else 0, axis=1)
dft12 = dft11.drop(["code", 'taux_Blancs et nuls', 'taux_droite', 'taux_xgauche',\
'taux_vert','taux_centre', 'Q1_rate', 'Q2_rate', 'Q3_rate', 'Q4_rate', "Total", \
'depart_OM', "Date","taux_Abstention_sup_moyenne",'taux_Blancs et nuls_sup_moyenne',
'depart_CORSE'], axis=1)
dft12["Date"] = dft12["Ann?e"] - 1981
list_dell = ["Ann?e","d?partement","taux_bgauche",'taux_Abstention',"taux_sortie_sans_bloc",
"taux_sortie_avec_bloc","taux_gauche", 'taux_xdroite',"taux_bdroite"]
df_train = dft12[dft12["Ann?e"]!=2012]
if option_fn==True:
df_train = df_train[df_train["Ann?e"]!=1981]
df_test = dft12[dft12["Ann?e"]==2012]
df_train_x = df_train.drop(list_dell, axis=1)
df_train_y = df_train[target]
df_test_x = df_test.drop(list_dell, axis=1)
df_test_y = df_test[target]
return df_train_x, df_train_y, df_test_x, df_test_y
features_keep = ["Nombre total de mariages domicili?s",
"taux_droite_sup_moyenne",
"taux_gauche_sup_moyenne",
"taux_xdroite_sup_moyenne",
"pop_xdroite",
"Densit?",
"subventions",
"pop_exec"
]
df_train_x, df_train_y, df_test_x, df_test_y = create_train_and_test("taux_xdroite", dft11, False)
df_train_x_select_features = df_train_x.ix[:, features_keep]
df_test_x_select_features = df_test_x.ix[:, features_keep]
lr = LinearRegression(fit_intercept=True, normalize=True)
lr.fit(df_train_x_select_features, df_train_y)
print(df_test_x_select_features.columns)
print("Résultat sans 2012 (avec test)")
print("R2 adjust train %s" % r2_adjusted(df_train_x_select_features, df_train_y, 8,lr))
print("R2 train %s" % lr.score(df_train_x_select_features, df_train_y))
print("MAE train %s" % metrics.mean_absolute_error(df_train_y, lr.predict(df_train_x_select_features)))
print("MAE test %s" % metrics.mean_absolute_error(df_test_y, lr.predict(df_test_x_select_features)))
print("\n")
print("Résultat avec fit entire data")
df_train_x, df_train_y = create_train("taux_xdroite", dft11, False)
df_train_x_select_features = df_train_x.ix[:, features_keep]
lr = LinearRegression(fit_intercept=True, normalize=True)
lr.fit(df_train_x_select_features, df_train_y)
print("R2 train %s" % lr.score(df_train_x_select_features, df_train_y))
print("R2 adjust train %s" % r2_adjusted(df_train_x_select_features, df_train_y, 8,lr))
print("MAE train %s" % metrics.mean_absolute_error(df_train_y, lr.predict(df_train_x_select_features)))
```
|
github_jupyter
|
import pandas as pd
import numpy as np
import seaborn as sns
import glob, os
from IPython.display import Image
from sklearn.preprocessing import StandardScaler
from sklearn.svm import LinearSVR, SVR
from sklearn import metrics
from sklearn.linear_model import LinearRegression,Ridge
import statsmodels.api as sm
from sklearn import linear_model
from sklearn.tree import DecisionTreeRegressor
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectFromModel
from sklearn.feature_selection import RFE
from sklearn import tree
import pydotplus
from sklearn.externals.six import StringIO
import matplotlib.pyplot as plt
%matplotlib inline
from collections import Counter
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2, f_regression
from sklearn.feature_selection import f_regression
from sklearn.linear_model import ElasticNet
import forward_selection as fs
from scipy.stats import chi2_contingency, pearsonr, spearmanr
dft11 = pd.read_csv("la_base.csv", encoding="latin1")
dft11["taux_bgauche"] = dft11['taux_xgauche']+ dft11['taux_gauche'] + dft11['taux_vert']
dft11["taux_bdroite"] = dft11['taux_droite']+ dft11['taux_centre']
#Taux_supp_moyenne sortant
#Popularité sortant
dft11["ecart_pop"] = dft11["pop_president"] - dft11["pop_premier_ministre"]
dft11["pop_exec"] = np.nan
dft11.ix[dft11["Ann?e"]==1981,"pop_exec"] = dft11.ix[dft11["Ann?e"]==1981,"pop_president"].iloc[0]
dft11.ix[dft11["Ann?e"]==1988,"pop_exec"] = dft11.ix[dft11["Ann?e"]==1988,"pop_president"].iloc[0]
dft11.ix[dft11["Ann?e"]==2012,"pop_exec"] = dft11.ix[dft11["Ann?e"]==2012,"pop_president"].iloc[0]
dft11.ix[dft11["Ann?e"]==1995,"pop_exec"] = dft11.ix[dft11["Ann?e"]==1995,"pop_premier_ministre"].iloc[0]
dft11.ix[dft11["Ann?e"]==2002,"pop_exec"] = dft11.ix[dft11["Ann?e"]==2002,"pop_premier_ministre"].iloc[0]
dft11.ix[dft11["Ann?e"]==2007,"pop_exec"] = dft11.ix[dft11["Ann?e"]==2007,"pop_premier_ministre"].iloc[0]
dft11.ix[dft11["Ann?e"]==1981,"pop_xdroite"] = dft11.ix[dft11["Ann?e"]==1988,"pop_xdroite"].iloc[0]
dft11.ix[dft11["Ann?e"]==1981,"pop_verts"] = dft11.ix[dft11["Ann?e"]==1995,"pop_verts"].iloc[0]
dft11.ix[dft11["Ann?e"]==1988,"pop_verts"] = dft11.ix[dft11["Ann?e"]==1995,"pop_verts"].iloc[0]
dft11.ix[dft11["Ann?e"]==1981,"nombre de logement vacant"] = 1891.
dft11["taux_sortie_sans_bloc"] = np.nan
dft11.ix[dft11["Ann?e"]==1981,"taux_sortie_sans_bloc"] = dft11.ix[dft11["Ann?e"]==1981,"taux_centre"]
dft11.ix[dft11["Ann?e"]==1988,"taux_sortie_sans_bloc"] = dft11.ix[dft11["Ann?e"]==1988,"taux_gauche"]
dft11.ix[dft11["Ann?e"]==1995,"taux_sortie_sans_bloc"] = dft11.ix[dft11["Ann?e"]==1995,"taux_gauche"]
dft11.ix[dft11["Ann?e"]==2002,"taux_sortie_sans_bloc"] = dft11.ix[dft11["Ann?e"]==2002,"taux_droite"]
dft11.ix[dft11["Ann?e"]==2007,"taux_sortie_sans_bloc"] = dft11.ix[dft11["Ann?e"]==2007,"taux_droite"]
dft11.ix[dft11["Ann?e"]==2012,"taux_sortie_sans_bloc"] = dft11.ix[dft11["Ann?e"]==2012,"taux_droite"]
dft11["taux_sortie_avec_bloc"] = np.nan
dft11.ix[dft11["Ann?e"]==1981,"taux_sortie_avec_bloc"] = dft11.ix[dft11["Ann?e"]==1981,"taux_centre"] +\
dft11.ix[dft11["Ann?e"]==1981,"taux_droite"]
dft11.ix[dft11["Ann?e"]==1988,"taux_sortie_avec_bloc"] = dft11.ix[dft11["Ann?e"]==1988,"taux_gauche"]+\
dft11.ix[dft11["Ann?e"]==1988,"taux_xgauche"] + dft11.ix[dft11["Ann?e"]==1988,"taux_vert"]
dft11.ix[dft11["Ann?e"]==1995,"taux_sortie_avec_bloc"] = dft11.ix[dft11["Ann?e"]==1995,"taux_gauche"]+\
dft11.ix[dft11["Ann?e"]==1995,"taux_xgauche"] + dft11.ix[dft11["Ann?e"]==1995,"taux_vert"]
dft11.ix[dft11["Ann?e"]==2002,"taux_sortie_avec_bloc"] = dft11.ix[dft11["Ann?e"]==2002,"taux_droite"]+\
dft11.ix[dft11["Ann?e"]==2002,"taux_centre"]
dft11.ix[dft11["Ann?e"]==2007,"taux_sortie_avec_bloc"] = dft11.ix[dft11["Ann?e"]==2007,"taux_droite"]+\
dft11.ix[dft11["Ann?e"]==2007,"taux_centre"]
dft11.ix[dft11["Ann?e"]==2012,"taux_sortie_avec_bloc"] = dft11.ix[dft11["Ann?e"]==2012,"taux_droite"]+\
dft11.ix[dft11["Ann?e"]==2012,"taux_centre"]
def create_train(target, dft11, option_fn=False):
dft11["CORS88"] = dft11.apply(lambda row : 1 if row["Ann?e"]==1988 and
row["code"] in ["2A","2B"] else 0, axis=1)
dft11["CORS02"] = dft11.apply(lambda row : 1 if row["Ann?e"]==2002 and
row["code"] in ["2A","2B"] else 0, axis=1)
dft12 = dft11.drop(["code", 'taux_Blancs et nuls', 'taux_droite', 'taux_xgauche',\
'taux_vert','taux_centre', 'Q1_rate', 'Q2_rate', 'Q3_rate', 'Q4_rate', "Total", \
'depart_OM', "Date","taux_Abstention_sup_moyenne",'taux_Blancs et nuls_sup_moyenne',
'depart_CORSE'], axis=1)
dft12["Date"] = dft12["Ann?e"] - 1981
list_dell = ["Ann?e","d?partement","taux_bgauche",'taux_Abstention',"taux_sortie_sans_bloc",
"taux_sortie_avec_bloc","taux_gauche", 'taux_xdroite',"taux_bdroite"]
if option_fn==True:
dft12 = dft12[dft12["Ann?e"]!=1981]
df_train_x = dft12.drop(list_dell, axis=1)
df_train_y = dft12[target]
return df_train_x, df_train_y
def create_train_and_test(target, dft11, option_fn=False):
dft11["CORS88"] = dft11.apply(lambda row : 1 if row["Ann?e"]==1988 and
row["code"] in ["2A","2B"] else 0, axis=1)
dft11["CORS02"] = dft11.apply(lambda row : 1 if row["Ann?e"]==2002 and
row["code"] in ["2A","2B"] else 0, axis=1)
dft12 = dft11.drop(["code", 'taux_Blancs et nuls', 'taux_droite', 'taux_xgauche',\
'taux_vert','taux_centre', 'Q1_rate', 'Q2_rate', 'Q3_rate', 'Q4_rate', "Total", \
'depart_OM', "Date","taux_Abstention_sup_moyenne",'taux_Blancs et nuls_sup_moyenne',
'depart_CORSE'], axis=1)
dft12["Date"] = dft12["Ann?e"] - 1981
list_dell = ["Ann?e","d?partement","taux_bgauche",'taux_Abstention',"taux_sortie_sans_bloc",
"taux_sortie_avec_bloc","taux_gauche", 'taux_xdroite',"taux_bdroite"]
df_train = dft12[dft12["Ann?e"]!=2012]
if option_fn==True:
df_train = df_train[df_train["Ann?e"]!=1981]
df_test = dft12[dft12["Ann?e"]==2012]
df_train_x = df_train.drop(list_dell, axis=1)
df_train_y = df_train[target]
df_test_x = df_test.drop(list_dell, axis=1)
df_test_y = df_test[target]
return df_train_x, df_train_y, df_test_x, df_test_y
features_keep = ["Nombre total de mariages domicili?s",
"taux_droite_sup_moyenne",
"taux_gauche_sup_moyenne",
"taux_xdroite_sup_moyenne",
"pop_xdroite",
"Densit?",
"subventions",
"pop_exec"
]
df_train_x, df_train_y, df_test_x, df_test_y = create_train_and_test("taux_xdroite", dft11, False)
df_train_x_select_features = df_train_x.ix[:, features_keep]
df_test_x_select_features = df_test_x.ix[:, features_keep]
lr = LinearRegression(fit_intercept=True, normalize=True)
lr.fit(df_train_x_select_features, df_train_y)
print(df_test_x_select_features.columns)
print("Résultat sans 2012 (avec test)")
print("R2 adjust train %s" % r2_adjusted(df_train_x_select_features, df_train_y, 8,lr))
print("R2 train %s" % lr.score(df_train_x_select_features, df_train_y))
print("MAE train %s" % metrics.mean_absolute_error(df_train_y, lr.predict(df_train_x_select_features)))
print("MAE test %s" % metrics.mean_absolute_error(df_test_y, lr.predict(df_test_x_select_features)))
print("\n")
print("Résultat avec fit entire data")
df_train_x, df_train_y = create_train("taux_xdroite", dft11, False)
df_train_x_select_features = df_train_x.ix[:, features_keep]
lr = LinearRegression(fit_intercept=True, normalize=True)
lr.fit(df_train_x_select_features, df_train_y)
print("R2 train %s" % lr.score(df_train_x_select_features, df_train_y))
print("R2 adjust train %s" % r2_adjusted(df_train_x_select_features, df_train_y, 8,lr))
print("MAE train %s" % metrics.mean_absolute_error(df_train_y, lr.predict(df_train_x_select_features)))
| 0.225502 | 0.212355 |
Initialization of ES
```
from elasticsearch import Elasticsearch
client = Elasticsearch(['elasticsearch'])
indice = "syslog-*"
```
1. Query All
```
query = {
"query": {
"match_all": {}
}
}
res = client.search(index=indice, body=query, scroll='100m', size=10000)
print("Got %d Hits:" % res['hits']['total']['value'])
sid = res['_scroll_id']
scroll_size = len(res['hits']['hits'])
while scroll_size > 0:
"Scrolling..."
print(scroll_size)
data = client.scroll(scroll_id=sid, scroll='2m')
# Update the scroll ID
sid = data['_scroll_id']
# Get the number of results that returned in the last scroll
scroll_size = len(data['hits']['hits'])
```
2. Match Query
```
query = {
"query": {
"match": {
"hostname":"for.org"
}
}
}
res = client.search(index=indice, body=query)
print("Got %d Hits:" % res['hits']['total']['value'])
for hit in res['hits']['hits']:
print("%(timestamp)s %(raw)s" % hit["_source"])
```
3. Multi Match
```
query = {
"query": {
"multi_match": {
"query": "up.com ahmadajmi",
"fields":["hostname", "application"]
}
}
}
res = client.search(index=indice, body=query)
print("Got %d Hits:" % res['hits']['total']['value'])
for hit in res['hits']['hits']:
print("%(timestamp)s %(raw)s" % hit["_source"])
```
4. String Query
```
query = {
"query": {
"query_string": {
"query": "(for.org) AND (pretty breath) "
}
}
}
res = client.search(index=indice, body=query)
print("Got %d Hits:" % res['hits']['total']['value'])
for hit in res['hits']['hits']:
print("%(timestamp)s %(raw)s" % hit["_source"])
```
5. Term Query
```
query = {
"query":{
"term":{"message":"pretty"}
}
}
res = client.search(index=indice, body=query)
print("Got %d Hits:" % res['hits']['total']['value'])
for hit in res['hits']['hits']:
print("%(timestamp)s %(raw)s" % hit["_source"])
```
6. Range Query
```
query = {
"query":{
"range":{
"version":{
"gte":2
}
}
}
}
res = client.search(index=indice, body=query)
print("Got %d Hits:" % res['hits']['total']['value'])
for hit in res['hits']['hits']:
print("%(timestamp)s %(raw)s" % hit["_source"])
```
7. Exist Query
```
query = {
"query": {
"exists": {
"field": "application"
}
}
}
res = client.search(index=indice, body=query)
print("Got %d Hits:" % res['hits']['total']['value'])
for hit in res['hits']['hits']:
print("%(timestamp)s %(raw)s" % hit["_source"])
```
8. Regex Query
https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-regexp-query.html
```
query = {
"query": {
"regexp": {
"hostname": {
"value": "up.*",
"flags": "ALL",
"max_determinized_states": 10000,
"rewrite": "constant_score"
}
}
}
}
res = client.search(index=indice, body=query)
print("Got %d Hits:" % res['hits']['total']['value'])
for hit in res['hits']['hits']:
print("%(timestamp)s %(raw)s" % hit["_source"])
```
9. Compount Query https://www.elastic.co/guide/en/elasticsearch/reference/current/compound-queries.html
```
query = {
"query": {
"bool" : {
"must" : {
"term" : { "hostname" : "random.net" }
},
"should": {
"term" : { "application" : "ahmadajmi" }
},
"minimum_should_match" : 1,
"boost" : 1.0
}
}
}
res = client.search(index=indice, body=query)
print("Got %d Hits:" % res['hits']['total']['value'])
for hit in res['hits']['hits']:
print("%(timestamp)s %(raw)s" % hit["_source"])
```
10. Count aggregation
```
query = {
"aggs":{
"version_count":{
"value_count":{
"field":"version"
}
}
}
}
res = client.search(index=indice, body=query )
print("Got %d Hits:" % res['hits']['total']['value'])
res['aggregations']
```
11. Cardinality aggregation
```
query = {
"aggs": {
"my-agg-name": {
"cardinality": {
"field": "priority"
}
}
}
}
res = client.search(index=indice, body=query, scroll='100m', size=10000)
print("Got %d Hits:" % res['hits']['total']['value'])
print(res['aggregations'])
```
|
github_jupyter
|
from elasticsearch import Elasticsearch
client = Elasticsearch(['elasticsearch'])
indice = "syslog-*"
query = {
"query": {
"match_all": {}
}
}
res = client.search(index=indice, body=query, scroll='100m', size=10000)
print("Got %d Hits:" % res['hits']['total']['value'])
sid = res['_scroll_id']
scroll_size = len(res['hits']['hits'])
while scroll_size > 0:
"Scrolling..."
print(scroll_size)
data = client.scroll(scroll_id=sid, scroll='2m')
# Update the scroll ID
sid = data['_scroll_id']
# Get the number of results that returned in the last scroll
scroll_size = len(data['hits']['hits'])
query = {
"query": {
"match": {
"hostname":"for.org"
}
}
}
res = client.search(index=indice, body=query)
print("Got %d Hits:" % res['hits']['total']['value'])
for hit in res['hits']['hits']:
print("%(timestamp)s %(raw)s" % hit["_source"])
query = {
"query": {
"multi_match": {
"query": "up.com ahmadajmi",
"fields":["hostname", "application"]
}
}
}
res = client.search(index=indice, body=query)
print("Got %d Hits:" % res['hits']['total']['value'])
for hit in res['hits']['hits']:
print("%(timestamp)s %(raw)s" % hit["_source"])
query = {
"query": {
"query_string": {
"query": "(for.org) AND (pretty breath) "
}
}
}
res = client.search(index=indice, body=query)
print("Got %d Hits:" % res['hits']['total']['value'])
for hit in res['hits']['hits']:
print("%(timestamp)s %(raw)s" % hit["_source"])
query = {
"query":{
"term":{"message":"pretty"}
}
}
res = client.search(index=indice, body=query)
print("Got %d Hits:" % res['hits']['total']['value'])
for hit in res['hits']['hits']:
print("%(timestamp)s %(raw)s" % hit["_source"])
query = {
"query":{
"range":{
"version":{
"gte":2
}
}
}
}
res = client.search(index=indice, body=query)
print("Got %d Hits:" % res['hits']['total']['value'])
for hit in res['hits']['hits']:
print("%(timestamp)s %(raw)s" % hit["_source"])
query = {
"query": {
"exists": {
"field": "application"
}
}
}
res = client.search(index=indice, body=query)
print("Got %d Hits:" % res['hits']['total']['value'])
for hit in res['hits']['hits']:
print("%(timestamp)s %(raw)s" % hit["_source"])
query = {
"query": {
"regexp": {
"hostname": {
"value": "up.*",
"flags": "ALL",
"max_determinized_states": 10000,
"rewrite": "constant_score"
}
}
}
}
res = client.search(index=indice, body=query)
print("Got %d Hits:" % res['hits']['total']['value'])
for hit in res['hits']['hits']:
print("%(timestamp)s %(raw)s" % hit["_source"])
query = {
"query": {
"bool" : {
"must" : {
"term" : { "hostname" : "random.net" }
},
"should": {
"term" : { "application" : "ahmadajmi" }
},
"minimum_should_match" : 1,
"boost" : 1.0
}
}
}
res = client.search(index=indice, body=query)
print("Got %d Hits:" % res['hits']['total']['value'])
for hit in res['hits']['hits']:
print("%(timestamp)s %(raw)s" % hit["_source"])
query = {
"aggs":{
"version_count":{
"value_count":{
"field":"version"
}
}
}
}
res = client.search(index=indice, body=query )
print("Got %d Hits:" % res['hits']['total']['value'])
res['aggregations']
query = {
"aggs": {
"my-agg-name": {
"cardinality": {
"field": "priority"
}
}
}
}
res = client.search(index=indice, body=query, scroll='100m', size=10000)
print("Got %d Hits:" % res['hits']['total']['value'])
print(res['aggregations'])
| 0.276691 | 0.697036 |
---
_You are currently looking at **version 1.0** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-machine-learning/resources/bANLa) course resource._
---
# Classifier Visualization Playground
The purpose of this notebook is to let you visualize various classsifiers' decision boundaries.
The data used in this notebook is based on the [UCI Mushroom Data Set](http://archive.ics.uci.edu/ml/datasets/Mushroom?ref=datanews.io) stored in `mushrooms.csv`.
In order to better vizualize the decision boundaries, we'll perform Principal Component Analysis (PCA) on the data to reduce the dimensionality to 2 dimensions. Dimensionality reduction will be covered in a later module of this course.
Play around with different models and parameters to see how they affect the classifier's decision boundary and accuracy!
```
%matplotlib notebook
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
df = pd.read_csv('readonly/mushrooms.csv')
df2 = pd.get_dummies(df)
df3 = df2.sample(frac=0.08)
X = df3.iloc[:,2:]
y = df3.iloc[:,1]
pca = PCA(n_components=2).fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(pca, y, random_state=0)
plt.figure(dpi=120)
plt.scatter(pca[y.values==0,0], pca[y.values==0,1], alpha=0.5, label='Edible', s=2)
plt.scatter(pca[y.values==1,0], pca[y.values==1,1], alpha=0.5, label='Poisonous', s=2)
plt.legend()
plt.title('Mushroom Data Set\nFirst Two Principal Components')
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.gca().set_aspect('equal')
def plot_mushroom_boundary(X, y, fitted_model):
plt.figure(figsize=(9.8,5), dpi=100)
for i, plot_type in enumerate(['Decision Boundary', 'Decision Probabilities']):
plt.subplot(1,2,i+1)
mesh_step_size = 0.01 # step size in the mesh
x_min, x_max = X[:, 0].min() - .1, X[:, 0].max() + .1
y_min, y_max = X[:, 1].min() - .1, X[:, 1].max() + .1
xx, yy = np.meshgrid(np.arange(x_min, x_max, mesh_step_size), np.arange(y_min, y_max, mesh_step_size))
if i == 0:
Z = fitted_model.predict(np.c_[xx.ravel(), yy.ravel()])
else:
try:
Z = fitted_model.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:,1]
except:
plt.text(0.4, 0.5, 'Probabilities Unavailable', horizontalalignment='center',
verticalalignment='center', transform = plt.gca().transAxes, fontsize=12)
plt.axis('off')
break
Z = Z.reshape(xx.shape)
plt.scatter(X[y.values==0,0], X[y.values==0,1], alpha=0.4, label='Edible', s=5)
plt.scatter(X[y.values==1,0], X[y.values==1,1], alpha=0.4, label='Posionous', s=5)
plt.imshow(Z, interpolation='nearest', cmap='RdYlBu_r', alpha=0.15,
extent=(x_min, x_max, y_min, y_max), origin='lower')
plt.title(plot_type + '\n' +
str(fitted_model).split('(')[0]+ ' Test Accuracy: ' + str(np.round(fitted_model.score(X, y), 5)))
plt.gca().set_aspect('equal');
plt.tight_layout()
plt.subplots_adjust(top=0.9, bottom=0.08, wspace=0.02)
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
from sklearn.neighbors import KNeighborsClassifier
model = KNeighborsClassifier(n_neighbors=20)
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier(max_depth=3)
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier()
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier()
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
from sklearn.svm import SVC
model = SVC(kernel='linear')
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
from sklearn.svm import SVC
model = SVC(kernel='rbf', C=1)
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
from sklearn.svm import SVC
model = SVC(kernel='rbf', C=10)
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
from sklearn.naive_bayes import GaussianNB
model = GaussianNB()
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
from sklearn.neural_network import MLPClassifier
model = MLPClassifier()
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
```
|
github_jupyter
|
%matplotlib notebook
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
df = pd.read_csv('readonly/mushrooms.csv')
df2 = pd.get_dummies(df)
df3 = df2.sample(frac=0.08)
X = df3.iloc[:,2:]
y = df3.iloc[:,1]
pca = PCA(n_components=2).fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(pca, y, random_state=0)
plt.figure(dpi=120)
plt.scatter(pca[y.values==0,0], pca[y.values==0,1], alpha=0.5, label='Edible', s=2)
plt.scatter(pca[y.values==1,0], pca[y.values==1,1], alpha=0.5, label='Poisonous', s=2)
plt.legend()
plt.title('Mushroom Data Set\nFirst Two Principal Components')
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.gca().set_aspect('equal')
def plot_mushroom_boundary(X, y, fitted_model):
plt.figure(figsize=(9.8,5), dpi=100)
for i, plot_type in enumerate(['Decision Boundary', 'Decision Probabilities']):
plt.subplot(1,2,i+1)
mesh_step_size = 0.01 # step size in the mesh
x_min, x_max = X[:, 0].min() - .1, X[:, 0].max() + .1
y_min, y_max = X[:, 1].min() - .1, X[:, 1].max() + .1
xx, yy = np.meshgrid(np.arange(x_min, x_max, mesh_step_size), np.arange(y_min, y_max, mesh_step_size))
if i == 0:
Z = fitted_model.predict(np.c_[xx.ravel(), yy.ravel()])
else:
try:
Z = fitted_model.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:,1]
except:
plt.text(0.4, 0.5, 'Probabilities Unavailable', horizontalalignment='center',
verticalalignment='center', transform = plt.gca().transAxes, fontsize=12)
plt.axis('off')
break
Z = Z.reshape(xx.shape)
plt.scatter(X[y.values==0,0], X[y.values==0,1], alpha=0.4, label='Edible', s=5)
plt.scatter(X[y.values==1,0], X[y.values==1,1], alpha=0.4, label='Posionous', s=5)
plt.imshow(Z, interpolation='nearest', cmap='RdYlBu_r', alpha=0.15,
extent=(x_min, x_max, y_min, y_max), origin='lower')
plt.title(plot_type + '\n' +
str(fitted_model).split('(')[0]+ ' Test Accuracy: ' + str(np.round(fitted_model.score(X, y), 5)))
plt.gca().set_aspect('equal');
plt.tight_layout()
plt.subplots_adjust(top=0.9, bottom=0.08, wspace=0.02)
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
from sklearn.neighbors import KNeighborsClassifier
model = KNeighborsClassifier(n_neighbors=20)
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier(max_depth=3)
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier()
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier()
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
from sklearn.svm import SVC
model = SVC(kernel='linear')
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
from sklearn.svm import SVC
model = SVC(kernel='rbf', C=1)
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
from sklearn.svm import SVC
model = SVC(kernel='rbf', C=10)
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
from sklearn.naive_bayes import GaussianNB
model = GaussianNB()
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
from sklearn.neural_network import MLPClassifier
model = MLPClassifier()
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
| 0.531696 | 0.928214 |
# Compute summary maps
### Compute ranges and cutoffs
```
bettoskjr3 = np.linspace(bettoskj_cp_tot_mean.min(),bettoskj_cp_tot_mean.max(),4)
bettoskj_r3co1 = bettoskjr3[1]
bettoskj_r3co2 = bettoskjr3[2]
skjr4 = np.linspace(skj_cp_tot_mean.min(),skj_cp_tot_mean.max(),5)
skj_r4co1 = skjr4[1]
skj_r4co2 = skjr4[2]
skj_r4co3 = skjr4[3]
betr4 = np.linspace(bet_cp_tot_mean.min(),bet_cp_tot_mean.max(),5)
bet_r4co1 = betr4[1]
bet_r4co2 = betr4[2]
bet_r4co3 = betr4[3]
```
### Define fxn to compute summary maps
```
def compute_summary_map(skj_cp_tot_now, bet_cp_tot_now, bettoskj_cp_tot_now):
summary_map_now = bettoskj_cp_tot_now.copy()
# - Low SKJ CPUE, low BET CPUE (LS-LB)
summary_map_now = xr.where((skj_cp_tot_now<=skj_r4co2)
& (bet_cp_tot_now<=bet_r4co2)
& (bettoskj_cp_tot_now>=bettoskj_r3co2)
, 0.5, summary_map_now)
summary_map_now = xr.where((skj_cp_tot_now<=skj_r4co2)
& (bet_cp_tot_now<=bet_r4co2)
& (bettoskj_cp_tot_now>bettoskj_r3co1)
& (bettoskj_cp_tot_now<bettoskj_r3co2)
, 0.75, summary_map_now)
summary_map_now = xr.where((skj_cp_tot_now<=skj_r4co2)
& (bet_cp_tot_now<=bet_r4co2)
& (bettoskj_cp_tot_now<=bettoskj_r3co1)
, 1, summary_map_now)
# - Low SKJ CPUE, high BET CPUE (LS-HB)
summary_map_now = xr.where((skj_cp_tot_now<skj_r4co2)
& (bet_cp_tot_now>bet_r4co2)
& (bettoskj_cp_tot_now>=bettoskj_r3co2)
, 1.5, summary_map_now)
summary_map_now = xr.where((skj_cp_tot_now<skj_r4co2)
& (bet_cp_tot_now>bet_r4co2)
& (bettoskj_cp_tot_now>bettoskj_r3co1)
& (bettoskj_cp_tot_now<bettoskj_r3co2)
, 1.75, summary_map_now)
summary_map_now = xr.where((skj_cp_tot_now<skj_r4co2)
& (bet_cp_tot_now>bet_r4co2)
& (bettoskj_cp_tot_now<=bettoskj_r3co1)
, 2, summary_map_now)
# - High SKJ CPUE, low BET CPUE (HS-LB)
summary_map_now = xr.where((skj_cp_tot_now>skj_r4co2)
& (bet_cp_tot_now<bet_r4co2)
& (bettoskj_cp_tot_now>=bettoskj_r3co2)
, 2.5, summary_map_now)
summary_map_now = xr.where((skj_cp_tot_now>skj_r4co2)
& (bet_cp_tot_now<bet_r4co2)
& (bettoskj_cp_tot_now>bettoskj_r3co1)
& (bettoskj_cp_tot_now<bettoskj_r3co2)
, 2.75, summary_map_now)
summary_map_now = xr.where((skj_cp_tot_now>skj_r4co2)
& (bet_cp_tot_now<bet_r4co2)
& (bettoskj_cp_tot_now<=bettoskj_r3co1)
, 3, summary_map_now)
# - High SKJ CPUE, high BET CPUE (HS-HB)
summary_map_now = xr.where((skj_cp_tot_now>=skj_r4co2)
& (bet_cp_tot_now>=bet_r4co2)
& (bettoskj_cp_tot_now>=bettoskj_r3co2)
, 4.5, summary_map_now)
summary_map_now = xr.where((skj_cp_tot_now>=skj_r4co2)
& (bet_cp_tot_now>=bet_r4co2)
& (bettoskj_cp_tot_now>bettoskj_r3co1)
& (bettoskj_cp_tot_now<bettoskj_r3co2)
, 4.75, summary_map_now)
summary_map_now = xr.where((skj_cp_tot_now>=skj_r4co2)
& (bet_cp_tot_now>=bet_r4co2)
& (bettoskj_cp_tot_now<=bettoskj_r3co1)
, 5, summary_map_now)
# - Medium SKJ CPUE, medium BET CPUE (MS-MB)
summary_map_now = xr.where((skj_cp_tot_now<skj_r4co3)
& (skj_cp_tot_now>skj_r4co1)
& (bet_cp_tot_now<bet_r4co3)
& (bet_cp_tot_now>bet_r4co1)
& (bettoskj_cp_tot_now>=bettoskj_r3co2)
, 3.5, summary_map_now)
summary_map_now = xr.where((skj_cp_tot_now<skj_r4co3)
& (skj_cp_tot_now>skj_r4co1)
& (bet_cp_tot_now<bet_r4co3)
& (bet_cp_tot_now>bet_r4co1)
& (bettoskj_cp_tot_now>bettoskj_r3co1)
& (bettoskj_cp_tot_now<bettoskj_r3co2)
, 3.75, summary_map_now)
summary_map_now = xr.where((skj_cp_tot_now<skj_r4co3)
& (skj_cp_tot_now>skj_r4co1)
& (bet_cp_tot_now<bet_r4co3)
& (bet_cp_tot_now>bet_r4co1)
& (bettoskj_cp_tot_now<=bettoskj_r3co1)
, 4, summary_map_now)
summary_map_now = xr.where(summary_map_now<0.5,0,summary_map_now)
return summary_map_now
```
### Compute mean summary maps
```
summary_map_mean = compute_summary_map(skj_cp_tot_mean, bet_cp_tot_mean, bettoskj_cp_tot_mean)
```
### Compute ENSO summary maps
```
summary_map_en = compute_summary_map(skj_cp_tot_en, bet_cp_tot_en, bettoskj_cp_tot_en)
summary_map_ln = compute_summary_map(skj_cp_tot_ln, bet_cp_tot_ln, bettoskj_cp_tot_ln)
```
### Compute seasonal summary maps
```
summary_map_win = compute_summary_map(skj_cp_tot_seas.sel(season='DJF'),
bet_cp_tot_seas.sel(season='DJF'),
bettoskj_cp_tot_seas.sel(season='DJF'))
summary_map_spr = compute_summary_map(skj_cp_tot_seas.sel(season='MAM'),
bet_cp_tot_seas.sel(season='MAM'),
bettoskj_cp_tot_seas.sel(season='MAM'))
summary_map_sum = compute_summary_map(skj_cp_tot_seas.sel(season='JJA'),
bet_cp_tot_seas.sel(season='JJA'),
bettoskj_cp_tot_seas.sel(season='JJA'))
summary_map_aut = compute_summary_map(skj_cp_tot_seas.sel(season='SON'),
bet_cp_tot_seas.sel(season='SON'),
bettoskj_cp_tot_seas.sel(season='SON'))
```
|
github_jupyter
|
bettoskjr3 = np.linspace(bettoskj_cp_tot_mean.min(),bettoskj_cp_tot_mean.max(),4)
bettoskj_r3co1 = bettoskjr3[1]
bettoskj_r3co2 = bettoskjr3[2]
skjr4 = np.linspace(skj_cp_tot_mean.min(),skj_cp_tot_mean.max(),5)
skj_r4co1 = skjr4[1]
skj_r4co2 = skjr4[2]
skj_r4co3 = skjr4[3]
betr4 = np.linspace(bet_cp_tot_mean.min(),bet_cp_tot_mean.max(),5)
bet_r4co1 = betr4[1]
bet_r4co2 = betr4[2]
bet_r4co3 = betr4[3]
def compute_summary_map(skj_cp_tot_now, bet_cp_tot_now, bettoskj_cp_tot_now):
summary_map_now = bettoskj_cp_tot_now.copy()
# - Low SKJ CPUE, low BET CPUE (LS-LB)
summary_map_now = xr.where((skj_cp_tot_now<=skj_r4co2)
& (bet_cp_tot_now<=bet_r4co2)
& (bettoskj_cp_tot_now>=bettoskj_r3co2)
, 0.5, summary_map_now)
summary_map_now = xr.where((skj_cp_tot_now<=skj_r4co2)
& (bet_cp_tot_now<=bet_r4co2)
& (bettoskj_cp_tot_now>bettoskj_r3co1)
& (bettoskj_cp_tot_now<bettoskj_r3co2)
, 0.75, summary_map_now)
summary_map_now = xr.where((skj_cp_tot_now<=skj_r4co2)
& (bet_cp_tot_now<=bet_r4co2)
& (bettoskj_cp_tot_now<=bettoskj_r3co1)
, 1, summary_map_now)
# - Low SKJ CPUE, high BET CPUE (LS-HB)
summary_map_now = xr.where((skj_cp_tot_now<skj_r4co2)
& (bet_cp_tot_now>bet_r4co2)
& (bettoskj_cp_tot_now>=bettoskj_r3co2)
, 1.5, summary_map_now)
summary_map_now = xr.where((skj_cp_tot_now<skj_r4co2)
& (bet_cp_tot_now>bet_r4co2)
& (bettoskj_cp_tot_now>bettoskj_r3co1)
& (bettoskj_cp_tot_now<bettoskj_r3co2)
, 1.75, summary_map_now)
summary_map_now = xr.where((skj_cp_tot_now<skj_r4co2)
& (bet_cp_tot_now>bet_r4co2)
& (bettoskj_cp_tot_now<=bettoskj_r3co1)
, 2, summary_map_now)
# - High SKJ CPUE, low BET CPUE (HS-LB)
summary_map_now = xr.where((skj_cp_tot_now>skj_r4co2)
& (bet_cp_tot_now<bet_r4co2)
& (bettoskj_cp_tot_now>=bettoskj_r3co2)
, 2.5, summary_map_now)
summary_map_now = xr.where((skj_cp_tot_now>skj_r4co2)
& (bet_cp_tot_now<bet_r4co2)
& (bettoskj_cp_tot_now>bettoskj_r3co1)
& (bettoskj_cp_tot_now<bettoskj_r3co2)
, 2.75, summary_map_now)
summary_map_now = xr.where((skj_cp_tot_now>skj_r4co2)
& (bet_cp_tot_now<bet_r4co2)
& (bettoskj_cp_tot_now<=bettoskj_r3co1)
, 3, summary_map_now)
# - High SKJ CPUE, high BET CPUE (HS-HB)
summary_map_now = xr.where((skj_cp_tot_now>=skj_r4co2)
& (bet_cp_tot_now>=bet_r4co2)
& (bettoskj_cp_tot_now>=bettoskj_r3co2)
, 4.5, summary_map_now)
summary_map_now = xr.where((skj_cp_tot_now>=skj_r4co2)
& (bet_cp_tot_now>=bet_r4co2)
& (bettoskj_cp_tot_now>bettoskj_r3co1)
& (bettoskj_cp_tot_now<bettoskj_r3co2)
, 4.75, summary_map_now)
summary_map_now = xr.where((skj_cp_tot_now>=skj_r4co2)
& (bet_cp_tot_now>=bet_r4co2)
& (bettoskj_cp_tot_now<=bettoskj_r3co1)
, 5, summary_map_now)
# - Medium SKJ CPUE, medium BET CPUE (MS-MB)
summary_map_now = xr.where((skj_cp_tot_now<skj_r4co3)
& (skj_cp_tot_now>skj_r4co1)
& (bet_cp_tot_now<bet_r4co3)
& (bet_cp_tot_now>bet_r4co1)
& (bettoskj_cp_tot_now>=bettoskj_r3co2)
, 3.5, summary_map_now)
summary_map_now = xr.where((skj_cp_tot_now<skj_r4co3)
& (skj_cp_tot_now>skj_r4co1)
& (bet_cp_tot_now<bet_r4co3)
& (bet_cp_tot_now>bet_r4co1)
& (bettoskj_cp_tot_now>bettoskj_r3co1)
& (bettoskj_cp_tot_now<bettoskj_r3co2)
, 3.75, summary_map_now)
summary_map_now = xr.where((skj_cp_tot_now<skj_r4co3)
& (skj_cp_tot_now>skj_r4co1)
& (bet_cp_tot_now<bet_r4co3)
& (bet_cp_tot_now>bet_r4co1)
& (bettoskj_cp_tot_now<=bettoskj_r3co1)
, 4, summary_map_now)
summary_map_now = xr.where(summary_map_now<0.5,0,summary_map_now)
return summary_map_now
summary_map_mean = compute_summary_map(skj_cp_tot_mean, bet_cp_tot_mean, bettoskj_cp_tot_mean)
summary_map_en = compute_summary_map(skj_cp_tot_en, bet_cp_tot_en, bettoskj_cp_tot_en)
summary_map_ln = compute_summary_map(skj_cp_tot_ln, bet_cp_tot_ln, bettoskj_cp_tot_ln)
summary_map_win = compute_summary_map(skj_cp_tot_seas.sel(season='DJF'),
bet_cp_tot_seas.sel(season='DJF'),
bettoskj_cp_tot_seas.sel(season='DJF'))
summary_map_spr = compute_summary_map(skj_cp_tot_seas.sel(season='MAM'),
bet_cp_tot_seas.sel(season='MAM'),
bettoskj_cp_tot_seas.sel(season='MAM'))
summary_map_sum = compute_summary_map(skj_cp_tot_seas.sel(season='JJA'),
bet_cp_tot_seas.sel(season='JJA'),
bettoskj_cp_tot_seas.sel(season='JJA'))
summary_map_aut = compute_summary_map(skj_cp_tot_seas.sel(season='SON'),
bet_cp_tot_seas.sel(season='SON'),
bettoskj_cp_tot_seas.sel(season='SON'))
| 0.192767 | 0.810441 |
# Thomson Scattering: Spectral Density
[thomson]: ../../diagnostics/thomson.rst
[spectral-density]: ../../api/plasmapy.diagnostics.thomson.spectral_density.rst#spectral-density
[sheffield]: https://www.sciencedirect.com/book/9780123748775/plasma-scattering-of-electromagnetic-radiation
The [thomson.spectral_density][spectral-density] function calculates the [spectral density function S(k,w)][sheffield], which is one of several terms that determine the scattered power spectrum for the Thomson scattering of a probe laser beam by a plasma. In particular, this function calculates $S(k,w)$ for the case of a plasma consisting of one or more ion species and electron populations under the assumption that all of the ion species and the electron fluid have Maxwellian velocity distribution functions and that the combined plasma is quasi-neutral. In this regime, the spectral density is given by the equation:
\begin{equation}
S(k,\omega) = \sum_e \frac{2\pi}{k} \bigg |1 - \frac{\chi_e}{\epsilon} \bigg |^2 f_{e0,e}\bigg ( \frac{\omega}{k} \bigg ) + \sum_i \frac{2\pi Z_i}{k} \bigg | \frac{\chi_e}{\epsilon} \bigg |^2 f_{i0, i} \bigg ( \frac{\omega}{k} \bigg )
\end{equation}
where $\chi_e$ is the electron component susceptibility of the plasma and $\epsilon = 1 + \sum_e \chi_e + \sum_i \chi_i$ is the total plasma dielectric function (with $\chi_i$ being the ion component of the susceptibility), $Z_i$ is the charge of each ion, $k$ is the scattering wavenumber, $\omega$ is the scattering frequency, and the functions $f_{e0,e}$ and $f_{i0,i}$ are the Maxwellian velocity distributions for the electrons and ion species respectively.
Thomson scattering can be either non-collective (the scattered spectrum is a linear sum of the light scattered by individual particles) or collective (the scattered spectrum is dominated by scattering off of collective plasma waves). The [thomson.spectral_density][spectral-density] function can be used in both cases. These regimes are delineated by the dimensionless constant $\alpha$:
\begin{equation}
\alpha = \frac{1}{k \lambda_{De}}
\end{equation}
where $\lambda_{De}$ is the Debye length. $\alpha > 1$ corresponds to collective scattering, while $\alpha < 1$ corresponds to non-collective scattering. Depending on which of these regimes applies, fitting the scattered spectrum can provide the electron (and sometimes ion) density and temperature. Doppler shifting of the spectrum can also provide a measurement of the drift velocity of each plasma species.
For a detailed explanation of the underlying physics (and derivations of these expressions), see ["Plasma Scattering of Electromagnetic Radiation" by Sheffield et al.][sheffield]
```
%matplotlib inline
import astropy.units as u
import matplotlib.pyplot as plt
import numpy as np
from plasmapy.diagnostics import thomson
```
Construct parameters that define the Thomson diagnostic setup, the probing beam and scattering collection. These parameters will be used for all examples.
```
# The probe wavelength can in theory be anything, but in practice integer frequency multiples of the Nd:YAG wavelength
# 1064 nm are used (532 corresponds to a frequency-doubled probe beam from such a laser).
probe_wavelength = 532 * u.nm
# Array of wavelengths over which to calcualte the spectral distribution
wavelengths = (
np.arange(probe_wavelength.value - 60, probe_wavelength.value + 60, 0.01) * u.nm
)
# The scattering geometry is defined by unit vectors for the orientation of the probe laser beam (probe_n) and
# the path from the scattering volume (where the measurement is made) to the detector (scatter_n).
# These can be setup for any experimental geometry.
probe_vec = np.array([1, 0, 0])
scattering_angle = np.deg2rad(63)
scatter_vec = np.array([np.cos(scattering_angle), np.sin(scattering_angle), 0])
```
In order to calcluate the scattered spectrum, we must also include some information about the plasma. For this plot we'll allow the ``fract``, ``ion_species``, ``fluid_vel``, and ``ion_vel`` keywords to keep their default values, describing a single-species H+ plasma at rest in the laboratory frame.
```
ne = 2e17 * u.cm ** -3
Te = 12 * u.eV
Ti = 10 * u.eV
alpha, Skw = thomson.spectral_density(
wavelengths,
probe_wavelength,
ne,
Te,
Ti,
probe_vec=probe_vec,
scatter_vec=scatter_vec,
)
fig, ax = plt.subplots()
ax.plot(wavelengths, Skw, lw=2)
ax.set_xlim(probe_wavelength.value - 10, probe_wavelength.value + 10)
ax.set_ylim(0, 1e-13)
ax.set_xlabel("$\lambda$ (nm)")
ax.set_ylabel("S(k,w)")
ax.set_title("Thomson Scattering Spectral Density")
```
### Example Cases in Different Scattering Regimes
We will now consider several example cases in different scattering regimes. In order to facilitate this, we'll set up each example as a dictionary of plasma parameters:
A single-species, stationary hydrogen plasma with a density and temperature that results in a scattering spectrum dominated by scattering off of single electrons.
```
non_collective = {
"name": "Non-Collective Regime",
"n": 5e15 * u.cm ** -3,
"Te": 40 * u.eV,
"Ti": np.array([10]) * u.eV,
"ion_species": ["H+"],
"electron_vel": np.array([[0, 0, 0]]) * u.km / u.s,
"ion_vel": np.array([[0, 0, 0]]) * u.km / u.s,
}
```
A single-species, stationary hydrogen plasma with a density and temperature that result in weakly collective scattering (scattering paramter $\alpha$ approaching 1)
```
weakly_collective = {
"name": "Weakly Collective Regime",
"n": 2e17 * u.cm ** -3,
"Te": 20 * u.eV,
"Ti": 10 * u.eV,
"ion_species": ["H+"],
"electron_vel": np.array([[0, 0, 0]]) * u.km / u.s,
"ion_vel": np.array([[0, 0, 0]]) * u.km / u.s,
}
```
A single-species, stationary hydrogen plasma with a density and temperature that result in a spectrum dominated by multi-particle scattering, including scattering off of ions.
```
collective = {
"name": "Collective Regime",
"n": 5e17 * u.cm ** -3,
"Te": 10 * u.eV,
"Ti": 4 * u.eV,
"ion_species": ["H+"],
"electron_vel": np.array([[0, 0, 0]]) * u.km / u.s,
"ion_vel": np.array([[0, 0, 0]]) * u.km / u.s,
}
```
A case identical to the collective example above, except that now the electron fluid has a substantial drift velocity parallel to the probe laser and the ions have a drift (relative to the electrons) at an angle.
```
drifts = {
"name": "Drift Velocities",
"n": 5e17 * u.cm ** -3,
"Te": 10 * u.eV,
"Ti": 10 * u.eV,
"ion_species": ["H+"],
"electron_vel": np.array([[700, 0, 0]]) * u.km / u.s,
"ion_vel": np.array([[-600, -100, 0]]) * u.km / u.s,
}
```
A case identical to the collective example, except that now the plasma consists 25% He+1 and 75% C+5, and two electron populations exist with different temperatures.
```
two_species = {
"name": "Two Ion and Electron Components",
"n": 5e17 * u.cm ** -3,
"Te": np.array([50, 10]) * u.eV,
"Ti": np.array([10, 50]) * u.eV,
"efract": np.array([0.5, 0.5]),
"ifract": np.array([0.25, 0.75]),
"ion_species": ["He-4 1+", "C-12 5+"],
"electron_vel": np.array([[0, 0, 0], [0, 0, 0]]) * u.km / u.s,
"ion_vel": np.array([[0, 0, 0], [0, 0, 0]]) * u.km / u.s,
}
examples = [non_collective, weakly_collective, collective, drifts, two_species]
```
For each example, plot the the spectral distribution function over a large range to show the broad electron scattering feature (top row) and a narrow range around the probe wavelength to show the ion scattering feature (bottom row)
```
fig, ax = plt.subplots(ncols=len(examples), nrows=2, figsize=[25, 10])
fig.subplots_adjust(wspace=0.4, hspace=0.4)
lbls = "abcdefg"
for i, x in enumerate(examples):
alpha, Skw = thomson.spectral_density(
wavelengths,
probe_wavelength,
x["n"],
x["Te"],
x["Ti"],
ifract=x.get("ifract"),
efract=x.get("efract"),
ion_species=x["ion_species"],
electron_vel=x["electron_vel"],
ion_vel=x["ion_vel"],
probe_vec=probe_vec,
scatter_vec=scatter_vec,
)
ax[0][i].axvline(x=probe_wavelength.value, color="red") # Mark the probe wavelength
ax[0][i].plot(wavelengths, Skw)
ax[0][i].set_xlim(probe_wavelength.value - 15, probe_wavelength.value + 15)
ax[0][i].set_ylim(0, 1e-13)
ax[0][i].set_xlabel("$\lambda$ (nm)")
ax[0][i].set_title(lbls[i] + ") " + x["name"] + "\n$\\alpha$={:.4f}".format(alpha))
ax[1][i].axvline(x=probe_wavelength.value, color="red") # Mark the probe wavelength
ax[1][i].plot(wavelengths, Skw)
ax[1][i].set_xlim(probe_wavelength.value - 1, probe_wavelength.value + 1)
ax[1][i].set_ylim(0, 1.1 * np.max(Skw.value))
ax[1][i].set_xlabel("$\lambda$ (nm)")
```
Plots of the spectral density function (Skw) which determines the amount of light scattered into different wavelengths.
a. In the non-collective regime only the electron feature is visible.
b. In the weakly collective regime (alpha approaches 1) an ion feature starts to appear and the electron feature is distorted
c. In the collective regime both features split into two peaks, corresponding to scattering off of forward and backwards propagating plasma oscillations.
d. The introduction of drift velocities introduces several Doppler shifts in the calculations, resulting in a shifted spectrum.
e. Including multiple ion and electron populations modifies the ion and electron features respectively.
|
github_jupyter
|
%matplotlib inline
import astropy.units as u
import matplotlib.pyplot as plt
import numpy as np
from plasmapy.diagnostics import thomson
# The probe wavelength can in theory be anything, but in practice integer frequency multiples of the Nd:YAG wavelength
# 1064 nm are used (532 corresponds to a frequency-doubled probe beam from such a laser).
probe_wavelength = 532 * u.nm
# Array of wavelengths over which to calcualte the spectral distribution
wavelengths = (
np.arange(probe_wavelength.value - 60, probe_wavelength.value + 60, 0.01) * u.nm
)
# The scattering geometry is defined by unit vectors for the orientation of the probe laser beam (probe_n) and
# the path from the scattering volume (where the measurement is made) to the detector (scatter_n).
# These can be setup for any experimental geometry.
probe_vec = np.array([1, 0, 0])
scattering_angle = np.deg2rad(63)
scatter_vec = np.array([np.cos(scattering_angle), np.sin(scattering_angle), 0])
ne = 2e17 * u.cm ** -3
Te = 12 * u.eV
Ti = 10 * u.eV
alpha, Skw = thomson.spectral_density(
wavelengths,
probe_wavelength,
ne,
Te,
Ti,
probe_vec=probe_vec,
scatter_vec=scatter_vec,
)
fig, ax = plt.subplots()
ax.plot(wavelengths, Skw, lw=2)
ax.set_xlim(probe_wavelength.value - 10, probe_wavelength.value + 10)
ax.set_ylim(0, 1e-13)
ax.set_xlabel("$\lambda$ (nm)")
ax.set_ylabel("S(k,w)")
ax.set_title("Thomson Scattering Spectral Density")
non_collective = {
"name": "Non-Collective Regime",
"n": 5e15 * u.cm ** -3,
"Te": 40 * u.eV,
"Ti": np.array([10]) * u.eV,
"ion_species": ["H+"],
"electron_vel": np.array([[0, 0, 0]]) * u.km / u.s,
"ion_vel": np.array([[0, 0, 0]]) * u.km / u.s,
}
weakly_collective = {
"name": "Weakly Collective Regime",
"n": 2e17 * u.cm ** -3,
"Te": 20 * u.eV,
"Ti": 10 * u.eV,
"ion_species": ["H+"],
"electron_vel": np.array([[0, 0, 0]]) * u.km / u.s,
"ion_vel": np.array([[0, 0, 0]]) * u.km / u.s,
}
collective = {
"name": "Collective Regime",
"n": 5e17 * u.cm ** -3,
"Te": 10 * u.eV,
"Ti": 4 * u.eV,
"ion_species": ["H+"],
"electron_vel": np.array([[0, 0, 0]]) * u.km / u.s,
"ion_vel": np.array([[0, 0, 0]]) * u.km / u.s,
}
drifts = {
"name": "Drift Velocities",
"n": 5e17 * u.cm ** -3,
"Te": 10 * u.eV,
"Ti": 10 * u.eV,
"ion_species": ["H+"],
"electron_vel": np.array([[700, 0, 0]]) * u.km / u.s,
"ion_vel": np.array([[-600, -100, 0]]) * u.km / u.s,
}
two_species = {
"name": "Two Ion and Electron Components",
"n": 5e17 * u.cm ** -3,
"Te": np.array([50, 10]) * u.eV,
"Ti": np.array([10, 50]) * u.eV,
"efract": np.array([0.5, 0.5]),
"ifract": np.array([0.25, 0.75]),
"ion_species": ["He-4 1+", "C-12 5+"],
"electron_vel": np.array([[0, 0, 0], [0, 0, 0]]) * u.km / u.s,
"ion_vel": np.array([[0, 0, 0], [0, 0, 0]]) * u.km / u.s,
}
examples = [non_collective, weakly_collective, collective, drifts, two_species]
fig, ax = plt.subplots(ncols=len(examples), nrows=2, figsize=[25, 10])
fig.subplots_adjust(wspace=0.4, hspace=0.4)
lbls = "abcdefg"
for i, x in enumerate(examples):
alpha, Skw = thomson.spectral_density(
wavelengths,
probe_wavelength,
x["n"],
x["Te"],
x["Ti"],
ifract=x.get("ifract"),
efract=x.get("efract"),
ion_species=x["ion_species"],
electron_vel=x["electron_vel"],
ion_vel=x["ion_vel"],
probe_vec=probe_vec,
scatter_vec=scatter_vec,
)
ax[0][i].axvline(x=probe_wavelength.value, color="red") # Mark the probe wavelength
ax[0][i].plot(wavelengths, Skw)
ax[0][i].set_xlim(probe_wavelength.value - 15, probe_wavelength.value + 15)
ax[0][i].set_ylim(0, 1e-13)
ax[0][i].set_xlabel("$\lambda$ (nm)")
ax[0][i].set_title(lbls[i] + ") " + x["name"] + "\n$\\alpha$={:.4f}".format(alpha))
ax[1][i].axvline(x=probe_wavelength.value, color="red") # Mark the probe wavelength
ax[1][i].plot(wavelengths, Skw)
ax[1][i].set_xlim(probe_wavelength.value - 1, probe_wavelength.value + 1)
ax[1][i].set_ylim(0, 1.1 * np.max(Skw.value))
ax[1][i].set_xlabel("$\lambda$ (nm)")
| 0.713332 | 0.984063 |
```
RunningInCOLAB = 'google.colab' in str(get_ipython())
if RunningInCOLAB:
!pip install scipy, matplotlib
```
# Information Theory
[Information theory](https://en.wikipedia.org/wiki/Information_theory) is the scientific study of the quantification, storage, and communication of digital information.
It is a subfield of mathematics used extensively in computing ranging from signal processing, ML/AI, digital compression, and more. For more information please check out this [article](https://machinelearningmastery.com/what-is-information-entropy/), upon which this tutorial is derived.
```
%matplotlib inline
from math import log2
import matplotlib.pyplot as plt
```
## Information
Information is a measurement of "surprise". Rare events (low probability) are more surprising when they occur, thus they have high information. Common events (high probability) are less surprising when they occur, thus they have low information. Therefore we can calculate the information in an event via the probability of the event occuring. It follows the equation:
$$
\text{information}(x) = h(x) = -\log_2 (p(x)) \quad \text{where } p(x) \text{ is the probability of event x occurring}
$$
Note: information is log base 2. This means information is measured in bits and it can be interpreted as the number of bits required to represent the event.
Also note: the negative sign assures the result is $[0, +\infty)$
```
probability = 0.5 # Probability of the event
h = -log2(probability)
print("p(x) = {}, information = {} bits".format(probability, h))
# list of probabilities
probs = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
# calculate information
info = [-log2(p) for p in probs]
# plot probability vs information
plt.plot(probs, info, marker='.')
plt.title('Probability vs Information')
plt.xlabel('Probability')
plt.ylabel('Information')
plt.show()
```
As can be seen, as the probability increases, the information gained decreases (but not linearly since we're using log!)
Also note, we can use other log bases for calculating entropy, this just simply changes the units. For instance, if $\ln(x)$ was used the units would change from bits to nats
## Entropy
Entropy is the information for a random variable X with a provided probability distribution p. It is denoted using $H(X)$ and simply the same as calculating the information for the probability distribution of events for the variable.
With $k$ in $K$ discrete states the equation for entropy is:
$$
Entropy(X) = H(X) = -\sum_{k \in K} p(k) * log(p(k))
$$
```
n = 6 # the number of events
p = 1.0 /n # probability of one event
e = -sum([p * log2(p) for _ in range(n)]) # calculate entropy
print('Hand Calculated Entropy: {:.4} bits'.format(e))
from scipy.stats import entropy
p = [1/6, 1/6, 1/6, 1/6, 1/6, 1/6]
e = entropy(p, base=2) # Use scipy's entropy to verify
print('Scipy Entropy: {:.4} bits'.format(e))
# calculate entropy
def entropy(events, ets=1e-15):
return -sum([p * log2(p + ets) for p in events])
# define probabilities
probs = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]
# create probability distribution
dists = [[p, 1.0 - p] for p in probs]
# calculate entropy for each distribution
ents = [entropy(d) for d in dists]
# plot probability distribution vs entropy
plt.plot(probs, ents, marker='.')
plt.title('Probability Distribution vs Entropy')
plt.xticks(probs, [str(d) for d in dists])
plt.xlabel('Probability Distribution')
plt.ylabel('Entropy (bits)')
plt.show()
```
Note how the more balanced the probability distribution the higher the entropy. This again relates back to the idea of surprise. It says that the more skewed a distribution where a single random event dominates, the less surprising it is. The inverse is also true that the more balanced a distribution the more surprising an event is.
Entropy is the basis for calculating the differences between distributions using metrics such as KL-Divergence and Cross-Entropy.
|
github_jupyter
|
RunningInCOLAB = 'google.colab' in str(get_ipython())
if RunningInCOLAB:
!pip install scipy, matplotlib
%matplotlib inline
from math import log2
import matplotlib.pyplot as plt
probability = 0.5 # Probability of the event
h = -log2(probability)
print("p(x) = {}, information = {} bits".format(probability, h))
# list of probabilities
probs = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
# calculate information
info = [-log2(p) for p in probs]
# plot probability vs information
plt.plot(probs, info, marker='.')
plt.title('Probability vs Information')
plt.xlabel('Probability')
plt.ylabel('Information')
plt.show()
n = 6 # the number of events
p = 1.0 /n # probability of one event
e = -sum([p * log2(p) for _ in range(n)]) # calculate entropy
print('Hand Calculated Entropy: {:.4} bits'.format(e))
from scipy.stats import entropy
p = [1/6, 1/6, 1/6, 1/6, 1/6, 1/6]
e = entropy(p, base=2) # Use scipy's entropy to verify
print('Scipy Entropy: {:.4} bits'.format(e))
# calculate entropy
def entropy(events, ets=1e-15):
return -sum([p * log2(p + ets) for p in events])
# define probabilities
probs = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]
# create probability distribution
dists = [[p, 1.0 - p] for p in probs]
# calculate entropy for each distribution
ents = [entropy(d) for d in dists]
# plot probability distribution vs entropy
plt.plot(probs, ents, marker='.')
plt.title('Probability Distribution vs Entropy')
plt.xticks(probs, [str(d) for d in dists])
plt.xlabel('Probability Distribution')
plt.ylabel('Entropy (bits)')
plt.show()
| 0.764628 | 0.983247 |
```
# Dependencies and Setup
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# Hide warning messages in notebook
import warnings
warnings.filterwarnings('ignore')
# File to Load (Remember to Change These)
mouse_drug_data_to_load = "data/mouse_drug_data.csv"
clinical_trial_data_to_load = "data/clinicaltrial_data.csv"
# Read the Mouse and Drug Data and the Clinical Trial Data
mouse_df = pd.read_csv(mouse_drug_data_to_load)
mouse_df.head()
ct_df = pd.read_csv(clinical_trial_data_to_load)
ct_df.head()
# Combine the data into a single dataset
combined_df = pd.merge(ct_df, mouse_df, how="inner", on="Mouse ID")
combined_df.head()
```
## Tumor Response to Treatment
```
combined_df.groupby("Drug")["Timepoint"].count()
tumor_response = combined_df.groupby(["Drug", "Timepoint"])
tumor_df = tumor_response["Tumor Volume (mm3)"].mean().to_frame()
tumor_df.head()
tumor_response_error = combined_df.groupby(["Drug","Timepoint"])["Tumor Volume (mm3)"].sem()
tre_df = tumor_response_error.to_frame()
tre_df.head()
# Minor Data Munging to Re-Format the Data Frames
tumor_plot_pre = tumor_df.unstack(0)
tumor_plot_df = tumor_plot_pre["Tumor Volume (mm3)"]
# Preview that Reformatting worked
tumor_plot_df.head()
x_axis = [0,5,10,15,20,25,30,35,40,45]
x_limit = 45
plt.figure(figsize=(10,7))
error = tre_df["Tumor Volume (mm3)"]["Capomulin"]
cap = plt.errorbar(x_axis, tumor_plot_df["Capomulin"], yerr=error, fmt="o", ls="dashed", linewidth=1, alpha=1, capsize=3,color ="red")
error = tre_df["Tumor Volume (mm3)"]["Infubinol"]
infu = plt.errorbar(x_axis, tumor_plot_df["Infubinol"], yerr=error, fmt="^", ls="dashed", linewidth=1, alpha=1, capsize=3,color ="blue")
error = tre_df["Tumor Volume (mm3)"]["Ketapril"]
keta = plt.errorbar(x_axis, tumor_plot_df["Ketapril"], yerr=error, fmt="s", ls="dashed", linewidth=1, alpha=1, capsize=3,color = "green")
error = tre_df["Tumor Volume (mm3)"]["Placebo"]
plac = plt.errorbar(x_axis, tumor_plot_df["Placebo"], yerr=error, fmt="D", ls="dashed", linewidth=1, alpha=1, capsize=3,color = "black")
plt.ylim(20, 80)
plt.xlim(0, 45)
plt.title("Tumor Response to Treatment", fontsize=20)
plt.xlabel("Time (Days)", fontsize=14)
plt.ylabel("Tumor Volume (mm3)", fontsize=14)
plt.grid(linestyle="dashed")
plt.legend((cap, infu, keta, plac), ("Capomulin", "Infubinol", "Ketapril", "Placebo"), fontsize=12)
# Save the Figure
plt.savefig('tumor_response.png')
# Show the Figure
plt.show()
```
## Metastatic Response to Treatment
```
# Store the Mean Met. Site Data Grouped by Drug and Timepoint
combined_df.groupby("Drug")["Timepoint"].count()
meta_response = combined_df.groupby(["Drug", "Timepoint"])
# Convert to DataFrame
meta_df = meta_response["Metastatic Sites"].mean().to_frame()
# Preview DataFrame
meta_df.head()
# Store the Standard Error associated with Met. Sites Grouped by Drug and Timepoint
meta_response_error = combined_df.groupby(["Drug","Timepoint"])["Metastatic Sites"].sem()
mre_df = meta_response_error.to_frame()
# Convert to DataFrame
mre_df = meta_response_error.to_frame()
# Preview DataFrame
mre_df.head()
# Minor Data Munging to Re-Format the Data Frames
meta_plot_pre = mre_df.unstack(0)
meta_plot_df = meta_plot_pre["Metastatic Sites"]
# Preview that Reformatting worked
meta_plot_df.head()
# This scatter plot shows how the tumor volume changes over time for each treatment.
plt.errorbar(np.arange(0, 50, 5), meta_df.loc["Capomulin", "Metastatic Sites"],
yerr = mre_df.loc["Capomulin", "Metastatic Sites"], marker= 'o', color='r', capsize = 2.5, linestyle = '--', label = "Capomulin")
plt.errorbar(np.arange(0, 50, 5), meta_df.loc["Ketapril", "Metastatic Sites"],
yerr = mre_df.loc["Ketapril", "Metastatic Sites"], marker= '^', color='b', capsize = 2.5, linestyle = '--', label = "Ketapril")
plt.errorbar(np.arange(0, 50, 5), meta_df.loc["Placebo", "Metastatic Sites"],
yerr = mre_df.loc["Placebo", "Metastatic Sites"], marker ='s', color='g', capsize = 2.5, linestyle = '--', label = "Placebo")
plt.errorbar(np.arange(0, 50, 5), meta_df.loc["Infubinol", "Metastatic Sites"],
yerr = mre_df.loc["Infubinol", "Metastatic Sites"], marker= '*', color='k', linestyle = '--', capsize = 2.5, label = "Infubinol")
# Add legend
plt.legend(loc="best")
# Add gridlines
plt.grid(alpha = 0.5)
# Add labels
plt.title('Metastatic Response to Treatment')
plt.xlabel('Time (days)')
plt.ylabel('Metastatic Sites')
# Add x limits and y limits
plt.xlim(0,49)
plt.ylim(0,3.9)
# Save the Figure
plt.savefig('meta_response.png')
# Show the Figure
plt.show()
```
## Survival Rates
```
# Store the Count of Mice Grouped by Drug and Timepoint (W can pass any metric)
pre_mice = combined_df.groupby(["Drug", "Timepoint"])["Mouse ID"].nunique()
# Convert to DataFrame
mice = pre_mice.to_frame()
mice = mice.rename(columns={"Mouse ID": "Mouse Count"})
# Preview DataFrame
mice.head()
# Minor Data Munging to Re-Format the Data Frames
mice_plot = mice.unstack(0)
mice_df = mice_plot["Mouse Count"]
# Preview the Data Frame
mice_df.head()
# Generate the Plot (Accounting for percentages)
plt.figure(figsize=(10,7))
plt.plot(np.arange(0, 50, 5), 100*subset_df.loc["Capomulin", "Mouse Count"]/25,
marker = 'o', color='r', label= "Capomulin", linestyle ='--')
plt.plot(np.arange(0, 50, 5), 100*subset_df.loc["Ketapril", "Mouse Count"]/25,
marker = 's', color='g', label= "Ketapril", linestyle ='--')
plt.plot(np.arange(0, 50, 5), 100*subset_df.loc["Placebo", "Mouse Count"]/25,
marker = '^', color='k', label= "Placebo", linestyle ='--')
plt.plot(np.arange(0, 50, 5), 100*subset_df.loc["Infubinol", "Mouse Count"]/25,
marker = '*', color='b', label= "Infubinol", linestyle ='--')
plt.ylim(35, 104)
plt.xlim(0, 49)
plt.title("Survival During Treatment", fontsize=20)
plt.xlabel("Time (Days)", fontsize=14)
plt.ylabel("Survival Rate (%)", fontsize=14)
plt.grid(linestyle="dashed")
plt.legend(loc="lower left")
# Save the Figure
plt.savefig('survival_response.png')
# Show the Figure
plt.show()
```
## Summary Bar Graph
```
capchange = ((tumor_plot_df["Capomulin"][45] - tumor_plot_df["Capomulin"][0])/45)*100
ceftchange = ((tumor_plot_df["Ceftamin"][45] - tumor_plot_df["Ceftamin"][0])/45)*100
infuchange = ((tumor_plot_df["Infubinol"][45] - tumor_plot_df["Infubinol"][0])/45)*100
ketachange = ((tumor_plot_df["Ketapril"][45] - tumor_plot_df["Ketapril"][0])/45)*100
naftchange = ((tumor_plot_df["Naftisol"][45] - tumor_plot_df["Naftisol"][0])/45)*100
placchange = ((tumor_plot_df["Placebo"][45] - tumor_plot_df["Placebo"][0])/45)*100
propchange = ((tumor_plot_df["Propriva"][45] - tumor_plot_df["Propriva"][0])/45)*100
ramichange = ((tumor_plot_df["Ramicane"][45] - tumor_plot_df["Ramicane"][0])/45)*100
stelchange = ((tumor_plot_df["Stelasyn"][45] - tumor_plot_df["Stelasyn"][0])/45)*100
zonichange = ((tumor_plot_df["Zoniferol"][45] - tumor_plot_df["Zoniferol"][0])/45)*100
drug_change_df = pd.DataFrame({"Drug": ["Capomulin","Ceftamin","Infubinol", "Ketapril", "Naftisol", "Placebo", "Propriva", "Ramicane", "Stelasyn", "Zoniferol"],
"Percent Change": [capchange, ceftchange, infuchange, ketachange, naftchange, placchange, propchange, ramichange, stelchange, zonichange]
})
drug_change_df
# Store all Relevant Percent Changes into a Tuple
plt.figure(figsize=(8,5))
rects1 = plt.bar(0, drug_change_df["Percent Change"][0], color='g', alpha=1, align="edge", ec="black", width=1)
rects2 = plt.bar(1, drug_change_df["Percent Change"][1], color='r', alpha=1, align="edge", ec="black", width=1)
rects3 = plt.bar(2, drug_change_df["Percent Change"][2], color='r', alpha=1, align="edge", ec="black", width=1)
rects4 = plt.bar(3, drug_change_df["Percent Change"][3], color='r', alpha=1, align="edge", ec="black", width=1)
tick_locations = [value+0.5 for value in x_axis]
plt.grid(linestyle="dashed")
plt.xticks(tick_locations, drug_change_df["Drug"])
plt.xlim(0, 4)
plt.ylim(-30, 70)
plt.title("Tumor Change Over 45 Day Treatment", fontsize=20)
plt.ylabel("% Tumor Volume Change")
def autolabel(rects):
for rect in rects:
height = rect.get_height()
plt.text(rect.get_x() + rect.get_width()/2., -8,
'%d' % int(height) + "%",
ha='center', va='bottom', color='white', fontsize=14)
autolabel(rects1)
def autolabel(rects):
for rect in rects:
height = rect.get_height()
plt.text(rect.get_x() + rect.get_width()/2., 2,
'%d' % int(height) + "%",
ha='center', va='bottom', color='white', fontsize=14)
autolabel(rects2)
autolabel(rects3)
autolabel(rects4)
# Save the Figure
plt.savefig('summary_bar.png')
# Show the Figure
plt.show()
```
|
github_jupyter
|
# Dependencies and Setup
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# Hide warning messages in notebook
import warnings
warnings.filterwarnings('ignore')
# File to Load (Remember to Change These)
mouse_drug_data_to_load = "data/mouse_drug_data.csv"
clinical_trial_data_to_load = "data/clinicaltrial_data.csv"
# Read the Mouse and Drug Data and the Clinical Trial Data
mouse_df = pd.read_csv(mouse_drug_data_to_load)
mouse_df.head()
ct_df = pd.read_csv(clinical_trial_data_to_load)
ct_df.head()
# Combine the data into a single dataset
combined_df = pd.merge(ct_df, mouse_df, how="inner", on="Mouse ID")
combined_df.head()
combined_df.groupby("Drug")["Timepoint"].count()
tumor_response = combined_df.groupby(["Drug", "Timepoint"])
tumor_df = tumor_response["Tumor Volume (mm3)"].mean().to_frame()
tumor_df.head()
tumor_response_error = combined_df.groupby(["Drug","Timepoint"])["Tumor Volume (mm3)"].sem()
tre_df = tumor_response_error.to_frame()
tre_df.head()
# Minor Data Munging to Re-Format the Data Frames
tumor_plot_pre = tumor_df.unstack(0)
tumor_plot_df = tumor_plot_pre["Tumor Volume (mm3)"]
# Preview that Reformatting worked
tumor_plot_df.head()
x_axis = [0,5,10,15,20,25,30,35,40,45]
x_limit = 45
plt.figure(figsize=(10,7))
error = tre_df["Tumor Volume (mm3)"]["Capomulin"]
cap = plt.errorbar(x_axis, tumor_plot_df["Capomulin"], yerr=error, fmt="o", ls="dashed", linewidth=1, alpha=1, capsize=3,color ="red")
error = tre_df["Tumor Volume (mm3)"]["Infubinol"]
infu = plt.errorbar(x_axis, tumor_plot_df["Infubinol"], yerr=error, fmt="^", ls="dashed", linewidth=1, alpha=1, capsize=3,color ="blue")
error = tre_df["Tumor Volume (mm3)"]["Ketapril"]
keta = plt.errorbar(x_axis, tumor_plot_df["Ketapril"], yerr=error, fmt="s", ls="dashed", linewidth=1, alpha=1, capsize=3,color = "green")
error = tre_df["Tumor Volume (mm3)"]["Placebo"]
plac = plt.errorbar(x_axis, tumor_plot_df["Placebo"], yerr=error, fmt="D", ls="dashed", linewidth=1, alpha=1, capsize=3,color = "black")
plt.ylim(20, 80)
plt.xlim(0, 45)
plt.title("Tumor Response to Treatment", fontsize=20)
plt.xlabel("Time (Days)", fontsize=14)
plt.ylabel("Tumor Volume (mm3)", fontsize=14)
plt.grid(linestyle="dashed")
plt.legend((cap, infu, keta, plac), ("Capomulin", "Infubinol", "Ketapril", "Placebo"), fontsize=12)
# Save the Figure
plt.savefig('tumor_response.png')
# Show the Figure
plt.show()
# Store the Mean Met. Site Data Grouped by Drug and Timepoint
combined_df.groupby("Drug")["Timepoint"].count()
meta_response = combined_df.groupby(["Drug", "Timepoint"])
# Convert to DataFrame
meta_df = meta_response["Metastatic Sites"].mean().to_frame()
# Preview DataFrame
meta_df.head()
# Store the Standard Error associated with Met. Sites Grouped by Drug and Timepoint
meta_response_error = combined_df.groupby(["Drug","Timepoint"])["Metastatic Sites"].sem()
mre_df = meta_response_error.to_frame()
# Convert to DataFrame
mre_df = meta_response_error.to_frame()
# Preview DataFrame
mre_df.head()
# Minor Data Munging to Re-Format the Data Frames
meta_plot_pre = mre_df.unstack(0)
meta_plot_df = meta_plot_pre["Metastatic Sites"]
# Preview that Reformatting worked
meta_plot_df.head()
# This scatter plot shows how the tumor volume changes over time for each treatment.
plt.errorbar(np.arange(0, 50, 5), meta_df.loc["Capomulin", "Metastatic Sites"],
yerr = mre_df.loc["Capomulin", "Metastatic Sites"], marker= 'o', color='r', capsize = 2.5, linestyle = '--', label = "Capomulin")
plt.errorbar(np.arange(0, 50, 5), meta_df.loc["Ketapril", "Metastatic Sites"],
yerr = mre_df.loc["Ketapril", "Metastatic Sites"], marker= '^', color='b', capsize = 2.5, linestyle = '--', label = "Ketapril")
plt.errorbar(np.arange(0, 50, 5), meta_df.loc["Placebo", "Metastatic Sites"],
yerr = mre_df.loc["Placebo", "Metastatic Sites"], marker ='s', color='g', capsize = 2.5, linestyle = '--', label = "Placebo")
plt.errorbar(np.arange(0, 50, 5), meta_df.loc["Infubinol", "Metastatic Sites"],
yerr = mre_df.loc["Infubinol", "Metastatic Sites"], marker= '*', color='k', linestyle = '--', capsize = 2.5, label = "Infubinol")
# Add legend
plt.legend(loc="best")
# Add gridlines
plt.grid(alpha = 0.5)
# Add labels
plt.title('Metastatic Response to Treatment')
plt.xlabel('Time (days)')
plt.ylabel('Metastatic Sites')
# Add x limits and y limits
plt.xlim(0,49)
plt.ylim(0,3.9)
# Save the Figure
plt.savefig('meta_response.png')
# Show the Figure
plt.show()
# Store the Count of Mice Grouped by Drug and Timepoint (W can pass any metric)
pre_mice = combined_df.groupby(["Drug", "Timepoint"])["Mouse ID"].nunique()
# Convert to DataFrame
mice = pre_mice.to_frame()
mice = mice.rename(columns={"Mouse ID": "Mouse Count"})
# Preview DataFrame
mice.head()
# Minor Data Munging to Re-Format the Data Frames
mice_plot = mice.unstack(0)
mice_df = mice_plot["Mouse Count"]
# Preview the Data Frame
mice_df.head()
# Generate the Plot (Accounting for percentages)
plt.figure(figsize=(10,7))
plt.plot(np.arange(0, 50, 5), 100*subset_df.loc["Capomulin", "Mouse Count"]/25,
marker = 'o', color='r', label= "Capomulin", linestyle ='--')
plt.plot(np.arange(0, 50, 5), 100*subset_df.loc["Ketapril", "Mouse Count"]/25,
marker = 's', color='g', label= "Ketapril", linestyle ='--')
plt.plot(np.arange(0, 50, 5), 100*subset_df.loc["Placebo", "Mouse Count"]/25,
marker = '^', color='k', label= "Placebo", linestyle ='--')
plt.plot(np.arange(0, 50, 5), 100*subset_df.loc["Infubinol", "Mouse Count"]/25,
marker = '*', color='b', label= "Infubinol", linestyle ='--')
plt.ylim(35, 104)
plt.xlim(0, 49)
plt.title("Survival During Treatment", fontsize=20)
plt.xlabel("Time (Days)", fontsize=14)
plt.ylabel("Survival Rate (%)", fontsize=14)
plt.grid(linestyle="dashed")
plt.legend(loc="lower left")
# Save the Figure
plt.savefig('survival_response.png')
# Show the Figure
plt.show()
capchange = ((tumor_plot_df["Capomulin"][45] - tumor_plot_df["Capomulin"][0])/45)*100
ceftchange = ((tumor_plot_df["Ceftamin"][45] - tumor_plot_df["Ceftamin"][0])/45)*100
infuchange = ((tumor_plot_df["Infubinol"][45] - tumor_plot_df["Infubinol"][0])/45)*100
ketachange = ((tumor_plot_df["Ketapril"][45] - tumor_plot_df["Ketapril"][0])/45)*100
naftchange = ((tumor_plot_df["Naftisol"][45] - tumor_plot_df["Naftisol"][0])/45)*100
placchange = ((tumor_plot_df["Placebo"][45] - tumor_plot_df["Placebo"][0])/45)*100
propchange = ((tumor_plot_df["Propriva"][45] - tumor_plot_df["Propriva"][0])/45)*100
ramichange = ((tumor_plot_df["Ramicane"][45] - tumor_plot_df["Ramicane"][0])/45)*100
stelchange = ((tumor_plot_df["Stelasyn"][45] - tumor_plot_df["Stelasyn"][0])/45)*100
zonichange = ((tumor_plot_df["Zoniferol"][45] - tumor_plot_df["Zoniferol"][0])/45)*100
drug_change_df = pd.DataFrame({"Drug": ["Capomulin","Ceftamin","Infubinol", "Ketapril", "Naftisol", "Placebo", "Propriva", "Ramicane", "Stelasyn", "Zoniferol"],
"Percent Change": [capchange, ceftchange, infuchange, ketachange, naftchange, placchange, propchange, ramichange, stelchange, zonichange]
})
drug_change_df
# Store all Relevant Percent Changes into a Tuple
plt.figure(figsize=(8,5))
rects1 = plt.bar(0, drug_change_df["Percent Change"][0], color='g', alpha=1, align="edge", ec="black", width=1)
rects2 = plt.bar(1, drug_change_df["Percent Change"][1], color='r', alpha=1, align="edge", ec="black", width=1)
rects3 = plt.bar(2, drug_change_df["Percent Change"][2], color='r', alpha=1, align="edge", ec="black", width=1)
rects4 = plt.bar(3, drug_change_df["Percent Change"][3], color='r', alpha=1, align="edge", ec="black", width=1)
tick_locations = [value+0.5 for value in x_axis]
plt.grid(linestyle="dashed")
plt.xticks(tick_locations, drug_change_df["Drug"])
plt.xlim(0, 4)
plt.ylim(-30, 70)
plt.title("Tumor Change Over 45 Day Treatment", fontsize=20)
plt.ylabel("% Tumor Volume Change")
def autolabel(rects):
for rect in rects:
height = rect.get_height()
plt.text(rect.get_x() + rect.get_width()/2., -8,
'%d' % int(height) + "%",
ha='center', va='bottom', color='white', fontsize=14)
autolabel(rects1)
def autolabel(rects):
for rect in rects:
height = rect.get_height()
plt.text(rect.get_x() + rect.get_width()/2., 2,
'%d' % int(height) + "%",
ha='center', va='bottom', color='white', fontsize=14)
autolabel(rects2)
autolabel(rects3)
autolabel(rects4)
# Save the Figure
plt.savefig('summary_bar.png')
# Show the Figure
plt.show()
| 0.606615 | 0.79854 |
```
import json, pickle
import sys; sys.path.insert(0, "..")
import editdistance
import numpy as np
import matplotlib.pyplot as plt
from program_synthesis.datasets.dataset import KarelTorchDataset
from program_synthesis.datasets.karel.mutation import KarelIncorrectExampleMutator, KarelExampleMutator
%matplotlib inline
nearai_data = KarelTorchDataset(
"../data/karel/val.pkl",
incorrect_mutator=KarelIncorrectExampleMutator.from_path(
"../baseline/nearai-val.json", False))
mutations3_data = KarelTorchDataset(
"../data/karel/val.pkl",
mutator=KarelExampleMutator(np.array([0, 0, 1]), True, False)
)
mutations3_distances = [
editdistance.eval(datum.code_sequence, datum.ref_example.code_sequence)
for datum in mutations3_data
]
nearai_distances = [
editdistance.eval(datum.code_sequence, datum.ref_example.code_sequence)
for datum in nearai_data
]
plt.hist(mutations3_distances, label='3 mutations', alpha=0.5)
plt.hist(nearai_distances, label='real outputs: nearai', alpha=0.5)
plt.xlabel("Edit Distance")
plt.ylabel("Frequency")
plt.legend()
plt.show()
def plot_distance_graphs(data, outcomes, title):
correct_distances = []
correct_distances_to_guess = []
correct_distances_gold_to_guess = []
incorrect_distances = []
for datum, outcome in zip(data, outcomes):
guess = outcomes[0]['code']['info']['candidates'][0]
dist_to_gold = editdistance.eval(datum.code_sequence, datum.ref_example.code_sequence)
dist_to_guess = editdistance.eval(guess, datum.ref_example.code_sequence)
dist_gold_to_guess = editdistance.eval(guess, datum.code_sequence)
is_correct = outcome['stats']['correct'] == 6
if is_correct:
correct_distances.append(dist_to_gold)
correct_distances_to_guess.append(dist_to_guess)
correct_distances_gold_to_guess.append(dist_gold_to_guess)
else:
incorrect_distances.append(dist_to_gold)
plt.hist(correct_distances, alpha=0.5, label='correct')
plt.hist(incorrect_distances, alpha=0.5, label='incorrect')
plt.xlabel("d(ref, gold)")
plt.ylabel("frequency")
plt.legend()
plt.title(title)
plt.show()
plt.scatter(correct_distances, correct_distances_to_guess, alpha=0.1)
plt.plot([0, 50], [0, 50])
plt.ylim(0, 50)
plt.xlim(0, 50)
plt.xlabel("d(ref, gold) [mean={:.2f}]".format(np.mean(correct_distances)))
plt.ylabel("d(ref, correct guess) [mean={:.2f}]".format(np.mean(correct_distances_to_guess)))
plt.title("Among Correct examples")
plt.show()
plt.scatter(correct_distances_gold_to_guess, correct_distances_to_guess, alpha=0.1)
plt.plot([0, 50], [0, 50])
plt.ylim(0, 50)
plt.xlim(0, 50)
plt.xlabel("d(gold, correct guess) [mean={:.2f}]".format(np.mean(correct_distances_gold_to_guess)))
plt.ylabel("d(ref, correct guess) [mean={:.2f}]".format(np.mean(correct_distances_to_guess)))
plt.title("Among Correct examples")
plt.show()
outcome_path = "../logdirs/vanilla,trace_enc==none,batch_size==64,lr==1,lr_decay_steps=100000/report-dev-mnearai,,best_first,,1-1769300-real.jsonl"
outcomes = [json.loads(line) for line in open(outcome_path)][1:]
plot_distance_graphs(nearai_data, outcomes, "best first (1 step)")
outcome_path = "../logdirs/vanilla,trace_enc==none,batch_size==64,lr==1,lr_decay_steps=100000/report-dev-mnearai,,best_first,,25-1769300-real.jsonl"
outcomes = [json.loads(line) for line in open(outcome_path)][1:]
plot_distance_graphs(nearai_data, outcomes, "best first (25 steps)")
```
|
github_jupyter
|
import json, pickle
import sys; sys.path.insert(0, "..")
import editdistance
import numpy as np
import matplotlib.pyplot as plt
from program_synthesis.datasets.dataset import KarelTorchDataset
from program_synthesis.datasets.karel.mutation import KarelIncorrectExampleMutator, KarelExampleMutator
%matplotlib inline
nearai_data = KarelTorchDataset(
"../data/karel/val.pkl",
incorrect_mutator=KarelIncorrectExampleMutator.from_path(
"../baseline/nearai-val.json", False))
mutations3_data = KarelTorchDataset(
"../data/karel/val.pkl",
mutator=KarelExampleMutator(np.array([0, 0, 1]), True, False)
)
mutations3_distances = [
editdistance.eval(datum.code_sequence, datum.ref_example.code_sequence)
for datum in mutations3_data
]
nearai_distances = [
editdistance.eval(datum.code_sequence, datum.ref_example.code_sequence)
for datum in nearai_data
]
plt.hist(mutations3_distances, label='3 mutations', alpha=0.5)
plt.hist(nearai_distances, label='real outputs: nearai', alpha=0.5)
plt.xlabel("Edit Distance")
plt.ylabel("Frequency")
plt.legend()
plt.show()
def plot_distance_graphs(data, outcomes, title):
correct_distances = []
correct_distances_to_guess = []
correct_distances_gold_to_guess = []
incorrect_distances = []
for datum, outcome in zip(data, outcomes):
guess = outcomes[0]['code']['info']['candidates'][0]
dist_to_gold = editdistance.eval(datum.code_sequence, datum.ref_example.code_sequence)
dist_to_guess = editdistance.eval(guess, datum.ref_example.code_sequence)
dist_gold_to_guess = editdistance.eval(guess, datum.code_sequence)
is_correct = outcome['stats']['correct'] == 6
if is_correct:
correct_distances.append(dist_to_gold)
correct_distances_to_guess.append(dist_to_guess)
correct_distances_gold_to_guess.append(dist_gold_to_guess)
else:
incorrect_distances.append(dist_to_gold)
plt.hist(correct_distances, alpha=0.5, label='correct')
plt.hist(incorrect_distances, alpha=0.5, label='incorrect')
plt.xlabel("d(ref, gold)")
plt.ylabel("frequency")
plt.legend()
plt.title(title)
plt.show()
plt.scatter(correct_distances, correct_distances_to_guess, alpha=0.1)
plt.plot([0, 50], [0, 50])
plt.ylim(0, 50)
plt.xlim(0, 50)
plt.xlabel("d(ref, gold) [mean={:.2f}]".format(np.mean(correct_distances)))
plt.ylabel("d(ref, correct guess) [mean={:.2f}]".format(np.mean(correct_distances_to_guess)))
plt.title("Among Correct examples")
plt.show()
plt.scatter(correct_distances_gold_to_guess, correct_distances_to_guess, alpha=0.1)
plt.plot([0, 50], [0, 50])
plt.ylim(0, 50)
plt.xlim(0, 50)
plt.xlabel("d(gold, correct guess) [mean={:.2f}]".format(np.mean(correct_distances_gold_to_guess)))
plt.ylabel("d(ref, correct guess) [mean={:.2f}]".format(np.mean(correct_distances_to_guess)))
plt.title("Among Correct examples")
plt.show()
outcome_path = "../logdirs/vanilla,trace_enc==none,batch_size==64,lr==1,lr_decay_steps=100000/report-dev-mnearai,,best_first,,1-1769300-real.jsonl"
outcomes = [json.loads(line) for line in open(outcome_path)][1:]
plot_distance_graphs(nearai_data, outcomes, "best first (1 step)")
outcome_path = "../logdirs/vanilla,trace_enc==none,batch_size==64,lr==1,lr_decay_steps=100000/report-dev-mnearai,,best_first,,25-1769300-real.jsonl"
outcomes = [json.loads(line) for line in open(outcome_path)][1:]
plot_distance_graphs(nearai_data, outcomes, "best first (25 steps)")
| 0.304559 | 0.531149 |
# Experiment #2 - Derived Features
## Overview
The purpose of this experiment is to determine whether derived features improve model performance. As part of the feature engineering process, we added the following categorical features:
* `pcontacted_last_campaign` - whether the customer was previously contacted as part of a campaign
* `pcampaign` - whether the customer was part of the the previous campaign
* `previous` - the original `previous` attribute converted to a discrete value
* `campaign_gte10` - whether the customer was contacted 10 or more times as part of this campaign
```
%load_ext autoreload
%autoreload 2
from utils import code
import pandas as pd
import numpy as np
import scikitplot as skplt
# Preprocessing
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.feature_selection import mutual_info_classif, chi2
# Model evaluation
from sklearn.model_selection import train_test_split
from support.evaluation import plot_learning_curve, evaluate_model
# Support
from support.model import Model, build_tuned_model
from support.datasets import get_data
from support.experiments import experiment_1, experiment_2, get_scorer
from support import parameters as params
# Algos
from sklearn.naive_bayes import BernoulliNB
from sklearn.ensemble import ExtraTreesClassifier
%matplotlib inline
```
## Data
```
X, y = get_data('../data/train.csv')
X.head()
```
Create a validation set and train the model then score on the test set. The performance will be biased since we are using less training data.
```
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2, stratify=y, random_state=1)
```
## ML Models
```
scorer = get_scorer()
```
Apply feature computations
```
pipeline_1 = experiment_1.get_pipeline()
features = pipeline_1.fit_transform(X)
ps = features.shape
print('Instances: {:,}, Features: {}'.format(ps[0], ps[1]))
```
### Original Model
Make sure we still get the same results ~.67 AUC
```
# Naive Bayes
param_grid = [{
'nb__alpha': [0, 0.01, 0.1, 1],
'nb__fit_prior': [True, False]
}]
tuned_model = build_tuned_model('nb', BernoulliNB(), X_train, y_train, param_grid, scorer, pipeline=pipeline_1, cv_folds=5)
```
### New Model
How does the performance change with the new features?
```
pipeline_2 = experiment_2.get_pipeline()
features = pipeline_2.fit_transform(X)
ps = features.shape
print('Instances: {:,}, Features: {}'.format(ps[0], ps[1]))
tuned_model = build_tuned_model('nb', BernoulliNB(), X_train, y_train, param_grid, scorer, pipeline=pipeline_2, cv_folds=5)
n_targeted_test = int(params.P_TARGETED * len(X_test))
revenue = params.AVG_REVENUE
cost = params.AVG_COST
probs = tuned_model.model.predict_proba(tuned_model.pipeline.transform(X_test))
preds = tuned_model.model.predict(tuned_model.pipeline.transform(X_test))
# Create a dataframe of probabilities and actual / predicted outcomes
probs_df = pd.DataFrame(np.hstack([probs, y_test.values.reshape(-1,1), preds.reshape(-1,1)]), columns=['p_no', 'p_yes', 'actual', 'predicted'])
# Sort customers by the probability that they will convert
model_targets = probs_df.sort_values('p_yes', ascending=False)
# Take the top 6.6%
model_targets = model_targets.head(n_targeted_test)
# Calculate financial outcomes
model_outcomes = model_targets.actual.apply(lambda x: cost if x == 0 else cost + revenue)
print(classification_report(model_targets.actual, model_targets.predicted))
print('Expected profit: ${:,}'.format(sum(model_outcomes)))
```
The score is lower than the original model, is it a high bias or high variance problem?
```
plot_learning_curve(tuned_model.model, 'New Model Learning Curve', experiment_2.get_pipeline().fit_transform(X_train), y_train, scoring=scorer);
```
High bias, and slightly higher variance. Can we improve it with feature selection?
### Feature Selection
```
X.head()
# Create a copy of the data frame with categorical features only
cat_ct = experiment_2.get_categorical_ct()
X_fs = cat_ct.fit_transform(X_train)
features = experiment_2.CATEGORICAL_FEATURES + experiment_2.NEW_CATEGORICAL_FEATURES
X_fs_df = pd.DataFrame(X_fs, columns=features)
ohe_features = experiment_2.get_categorical_pipeline().fit_transform(X_train, y_train)
print('Number of features:', ohe_features.shape[1])
param_grid = [{
'nb_fs__alpha': [0, 0.01, 0.1, 1],
'nb_fs__fit_prior': [True, False],
'kbest__k': np.arange(1, ohe_features.shape[1]+1),
'kbest__score_func': [chi2, mutual_info_classif]
}]
ft_pipeline = Pipeline([
('cat', experiment_2.get_categorical_pipeline()),
('kbest', SelectKBest())
])
tuned_model = build_tuned_model('nb_fs', BernoulliNB(), X_train, y_train, param_grid, scorer, pipeline=ft_pipeline, cv_folds=3)
tuned_model.pipeline.steps[-1]
```
The model achieves maximum performance with 18/66 features. At this point, we could consider adding numeric features and evaluating the performance of more powerful models if stakeholders are not satisfied with a \$56,316 lift over the baseline heuristic-based model and are willing to incur additional costs to improve the model.
|
github_jupyter
|
%load_ext autoreload
%autoreload 2
from utils import code
import pandas as pd
import numpy as np
import scikitplot as skplt
# Preprocessing
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.feature_selection import mutual_info_classif, chi2
# Model evaluation
from sklearn.model_selection import train_test_split
from support.evaluation import plot_learning_curve, evaluate_model
# Support
from support.model import Model, build_tuned_model
from support.datasets import get_data
from support.experiments import experiment_1, experiment_2, get_scorer
from support import parameters as params
# Algos
from sklearn.naive_bayes import BernoulliNB
from sklearn.ensemble import ExtraTreesClassifier
%matplotlib inline
X, y = get_data('../data/train.csv')
X.head()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2, stratify=y, random_state=1)
scorer = get_scorer()
pipeline_1 = experiment_1.get_pipeline()
features = pipeline_1.fit_transform(X)
ps = features.shape
print('Instances: {:,}, Features: {}'.format(ps[0], ps[1]))
# Naive Bayes
param_grid = [{
'nb__alpha': [0, 0.01, 0.1, 1],
'nb__fit_prior': [True, False]
}]
tuned_model = build_tuned_model('nb', BernoulliNB(), X_train, y_train, param_grid, scorer, pipeline=pipeline_1, cv_folds=5)
pipeline_2 = experiment_2.get_pipeline()
features = pipeline_2.fit_transform(X)
ps = features.shape
print('Instances: {:,}, Features: {}'.format(ps[0], ps[1]))
tuned_model = build_tuned_model('nb', BernoulliNB(), X_train, y_train, param_grid, scorer, pipeline=pipeline_2, cv_folds=5)
n_targeted_test = int(params.P_TARGETED * len(X_test))
revenue = params.AVG_REVENUE
cost = params.AVG_COST
probs = tuned_model.model.predict_proba(tuned_model.pipeline.transform(X_test))
preds = tuned_model.model.predict(tuned_model.pipeline.transform(X_test))
# Create a dataframe of probabilities and actual / predicted outcomes
probs_df = pd.DataFrame(np.hstack([probs, y_test.values.reshape(-1,1), preds.reshape(-1,1)]), columns=['p_no', 'p_yes', 'actual', 'predicted'])
# Sort customers by the probability that they will convert
model_targets = probs_df.sort_values('p_yes', ascending=False)
# Take the top 6.6%
model_targets = model_targets.head(n_targeted_test)
# Calculate financial outcomes
model_outcomes = model_targets.actual.apply(lambda x: cost if x == 0 else cost + revenue)
print(classification_report(model_targets.actual, model_targets.predicted))
print('Expected profit: ${:,}'.format(sum(model_outcomes)))
plot_learning_curve(tuned_model.model, 'New Model Learning Curve', experiment_2.get_pipeline().fit_transform(X_train), y_train, scoring=scorer);
X.head()
# Create a copy of the data frame with categorical features only
cat_ct = experiment_2.get_categorical_ct()
X_fs = cat_ct.fit_transform(X_train)
features = experiment_2.CATEGORICAL_FEATURES + experiment_2.NEW_CATEGORICAL_FEATURES
X_fs_df = pd.DataFrame(X_fs, columns=features)
ohe_features = experiment_2.get_categorical_pipeline().fit_transform(X_train, y_train)
print('Number of features:', ohe_features.shape[1])
param_grid = [{
'nb_fs__alpha': [0, 0.01, 0.1, 1],
'nb_fs__fit_prior': [True, False],
'kbest__k': np.arange(1, ohe_features.shape[1]+1),
'kbest__score_func': [chi2, mutual_info_classif]
}]
ft_pipeline = Pipeline([
('cat', experiment_2.get_categorical_pipeline()),
('kbest', SelectKBest())
])
tuned_model = build_tuned_model('nb_fs', BernoulliNB(), X_train, y_train, param_grid, scorer, pipeline=ft_pipeline, cv_folds=3)
tuned_model.pipeline.steps[-1]
| 0.760917 | 0.946349 |
```
import numpy as np
from numpy import *
import scipy.special as sp
from math import pow
import linecache
import math
from scipy.special import erf
from numpy.linalg import eig
import sys
from scipy import special
import matplotlib.pyplot as plt
import time
stepindex_ps=[]; time_ps=[]; ekinc_au=[]; Tion_K=[]; etot_au=[]; enthal_au=[]; econs_au=[]; econt_au=[]; Volume_au=[]
conversion = 27.211
with open("cubic_only_bulkwater.txt", 'r',buffering=100000) as f:
for line in f:
stepindex_ps.append(float(line.split()[0]))
time_ps.append(float(line.split()[1]))
ekinc_au.append(float(line.split()[2]))
Tion_K.append(float(line.split()[4]))
etot_au.append(float(line.split()[5]))
enthal_au.append(float(line.split()[6]))
econs_au.append(float(line.split()[7]))
econt_au.append(float(line.split()[8]))
Volume_au.append(float(line.split()[9]))
#print(econs_au)
#Convert data from atomic unit au to electron volts eV
enthal_eV = np.array(enthal_au) * conversion
#print(enthal_eV)
ekinc_eV = np.array(ekinc_au) * conversion
econs_eV = np.array(econs_au) * conversion
econt_eV = np.array(econt_au) * conversion
#NVT
#Calculates min, max and mean values of the parameters of interest
min_stepindex_ps = np.min(stepindex_ps[0:52000])
max_stepindex_ps = np.max( stepindex_ps[0:52000])
Ave_stepindex_ps = np.mean( stepindex_ps[0:52000])
dif_stepindex_ps = max_stepindex_ps - min_stepindex_ps
print("min_stepindex_ps =", min_stepindex_ps)
print("max_stepindex_ps =", max_stepindex_ps)
print("Ave_stepindex_ps =", Ave_stepindex_ps)
print("dif_stepindex_ps =", dif_stepindex_ps)
min_time_ps = np.min(time_ps[0:52000])
max_time_ps = np.max(time_ps[0:52000])
Ave_time_ps = np.mean(time_ps[0:52000])
dif_time_ps = max_time_ps - min_time_ps
print("min_time_ps =", min_time_ps)
print("max_time_ps =", max_time_ps)
print("Ave_time_ps =", Ave_time_ps)
print("dif_time_ps =", dif_time_ps)
min_Tion_K = np.min(Tion_K[0:52000])
max_Tion_K = np.max(Tion_K[0:52000])
Ave_Tion_K = np.mean(Tion_K[0:52000])
dif_Tion_K = max_Tion_K - min_Tion_K
print("min_Tion_K =", min_Tion_K)
print("max_Tion_K =", max_Tion_K)
print("Ave_Tion_K =", Ave_Tion_K)
print("dif_Tion_K =", dif_Tion_K)
min_ekinc_eV = np.min(ekinc_eV[0:52000])
max_ekinc_eV = np.max(ekinc_eV[0:52000])
Ave_ekinc_eV = np.mean(ekinc_eV[0:52000])
dif_ekinc_eV = max_ekinc_eV - min_ekinc_eV
print("min_ekinc_eV =", min_ekinc_eV)
print("max_ekinc_eV =", max_ekinc_eV)
print("Ave_ekinc_eV =", Ave_ekinc_eV)
print("dif_ekinc_eV =", dif_ekinc_eV)
min_econs_eV = np.min(econs_eV[0:52000])
max_econs_eV = np.max(econs_eV[0:52000])
Ave_econs_eV = np.mean(econs_eV[0:52000])
dif_econs_eV = max_econs_eV - min_econs_eV
print("min_econs_eV =", min_econs_eV)
print("max_econs_eV =", max_econs_eV)
print("Ave_econs_eV =", Ave_econs_eV)
print("dif_econs_eV =", dif_econs_eV)
min_econt_eV = np.min(econt_eV[0:52000])
max_econt_eV = np.max(econt_eV[0:52000])
Ave_econt_eV = np.mean(econt_eV[0:52000])
dif_econt_eV = max_econt_eV - min_econt_eV
print("min_econt_eV =", min_econt_eV)
print("max_econt_eV =", max_econt_eV)
print("Ave_econt_eV =", Ave_econt_eV)
print("dif_econt_eV =", dif_econt_eV)
#NVT
#Calculates the drift in temperature (K/ps) of the computed energies
#Calculates the drift in energy (eV/ps) of the computed energies
drift_Tion_K = dif_Tion_K/dif_stepindex_ps
print(drift_Tion_K)
drift_ekinc_eV = dif_ekinc_eV/dif_stepindex_ps
print(drift_ekinc_eV)
drift_econs_eV = dif_econs_eV/dif_stepindex_ps
print(drift_econs_eV)
drift_econt_eV = dif_econt_eV/dif_stepindex_ps
print(drift_econt_eV)
#NVE
#Calculates min, max and mean values of the parameters of interest
nve_min_stepindex_ps = np.min(stepindex_ps[52000:])
nve_max_stepindex_ps = np.max( stepindex_ps[52000:])
nve_Ave_stepindex_ps = np.mean( stepindex_ps[52000:])
nve_dif_stepindex_ps = nve_max_stepindex_ps - nve_min_stepindex_ps
print("nve_min_stepindex_ps =", nve_min_stepindex_ps)
print("nve_max_stepindex_ps =", nve_max_stepindex_ps)
print("nve_Ave_stepindex_ps =", nve_Ave_stepindex_ps)
print("nve_dif_stepindex_ps =", nve_dif_stepindex_ps)
nve_min_time_ps = np.min(time_ps[52000:])
nve_max_time_ps = np.max(time_ps[52000:])
nve_Ave_time_ps = np.mean(time_ps[52000:])
nve_dif_time_ps = nve_max_time_ps - nve_min_time_ps
print("nve_min_time_ps =", nve_min_time_ps)
print("nve_max_time_ps =", nve_max_time_ps)
print("nve_Ave_time_ps =", nve_Ave_time_ps)
print("nve_dif_time_ps =", nve_dif_time_ps)
nve_min_Tion_K = np.min(Tion_K[52000:])
nve_max_Tion_K = np.max(Tion_K[52000:])
nve_Ave_Tion_K = np.mean(Tion_K[52000:])
nve_dif_Tion_K = nve_max_Tion_K - nve_min_Tion_K
print("nve_min_Tion_K =", nve_min_Tion_K)
print("nve_max_Tion_K =", nve_max_Tion_K)
print("nve_Ave_Tion_K =", nve_Ave_Tion_K)
print("nve_dif_Tion_K =", nve_dif_Tion_K)
nve_min_ekinc_eV = np.min(ekinc_eV[52000:])
nve_max_ekinc_eV = np.max(ekinc_eV[52000:])
nve_Ave_ekinc_eV = np.mean(ekinc_eV[52000:])
nve_dif_ekinc_eV = nve_max_ekinc_eV - nve_min_ekinc_eV
print("nve_min_ekinc_eV =", nve_min_ekinc_eV)
print("nve_max_ekinc_eV =", nve_max_ekinc_eV)
print("nve_Ave_ekinc_eV =", nve_Ave_ekinc_eV)
print("nve_dif_ekinc_eV =", nve_dif_ekinc_eV)
nve_min_econs_eV = np.min(econs_eV[52000:])
nve_max_econs_eV = np.max(econs_eV[52000:])
nve_Ave_econs_eV = np.mean(econs_eV[52000:])
nve_dif_econs_eV = nve_max_econs_eV - nve_min_econs_eV
print("nve_min_econs_eV =", nve_min_econs_eV)
print("nve_max_econs_eV =", nve_max_econs_eV)
print("nve_Ave_econs_eV =", nve_Ave_econs_eV)
print("nve_dif_econs_eV =", nve_dif_econs_eV)
nve_min_econt_eV = np.min(econt_eV[52000:])
nve_max_econt_eV = np.max(econt_eV[52000:])
nve_Ave_econt_eV = np.mean(econt_eV[52000:])
nve_dif_econt_eV = nve_max_econt_eV - nve_min_econt_eV
print("nve_min_econt_eV =", nve_min_econt_eV)
print("nve_max_econt_eV =", nve_max_econt_eV)
print("nve_Ave_econt_eV =", nve_Ave_econt_eV)
print("nve_dif_econt_eV =", nve_dif_econt_eV)
#NVE
#Calculates the drift in temperature (K/ps) of the computed energies
#Calculates the drift in energy (eV/ps) of the computed energies
nve_drift_Tion_K = nve_dif_Tion_K/nve_dif_stepindex_ps
print(nve_drift_Tion_K)
nve_drift_ekinc_eV = nve_dif_ekinc_eV/nve_dif_stepindex_ps
print(nve_drift_ekinc_eV)
nve_drift_econs_eV = nve_dif_econs_eV/nve_dif_stepindex_ps
print(nve_drift_econs_eV)
nve_drift_econt_eV = nve_dif_econt_eV/nve_dif_stepindex_ps
print(nve_drift_econt_eV)
plt.figure(figsize=(12,8))
plt.ticklabel_format(useOffset=False)
plt.plot(np.array(time_ps)[0:52000], enthal_eV[0:52000], color="darkviolet", label='PBE: 300 K NVT', linewidth=1)
#plt.plot(np.array(time_ps)[52000:], enthal_eV[52000:], color="red", label='PBE: NVE', linewidth=1)
plt.minorticks_on()
plt.tick_params(axis='both', which='major', length=15, width=3)
plt.tick_params(axis='both', which='minor', length=5, width=3)
plt.xticks(np.arange(0, 5.4, 1))
plt.yticks(np.arange(-33160, -33152.2, 2))
plt.ylim([-33160, -33152.2])
plt.xticks(fontsize=25, fontweight='bold')
plt.yticks(fontsize=25, fontweight='bold')
plt.legend(loc=1, prop={'size': 20}, handlelength=3.5)
plt.xlabel('Time step [ps]', fontsize=25, fontweight='bold')
plt.ylabel('Enthalpy [eV]', fontsize=25, fontweight='bold')
plt.savefig('water_enthalpy_pbe_300k_nvt.png', bbox_inches='tight', dpi=400)
plt.show()
plt.close()
plt.figure(figsize=(12,8))
plt.ticklabel_format(useOffset=False)
#plt.plot(np.array(time_ps)[0:52000], enthal_eV[0:52000], color="darkviolet", label='PBE: 300 K NVT', linewidth=1)
plt.plot(np.array(time_ps)[52000:], enthal_eV[52000:], color="red", label='PBE: NVE', linewidth=1)
plt.minorticks_on()
plt.tick_params(axis='both', which='major', length=15, width=3)
plt.tick_params(axis='both', which='minor', length=5, width=3)
plt.xticks(np.arange(5, 16.4, 1))
plt.yticks(np.arange(-33157, -33152.2, 2))
plt.ylim([-33157, -33152.2])
plt.xticks(fontsize=25, fontweight='bold')
plt.yticks(fontsize=25, fontweight='bold')
plt.legend(loc=1, prop={'size': 20}, handlelength=3.5)
plt.xlabel('Time step [ps]', fontsize=25, fontweight='bold')
plt.ylabel('Enthalpy [eV]', fontsize=25, fontweight='bold')
plt.savefig('water_enthalpy_pbe_nve.png', bbox_inches='tight', dpi=400)
plt.show()
plt.close()
plt.figure(figsize=(12,8))
plt.ticklabel_format(useOffset=False)
plt.plot(time_ps[0:52000], Tion_K[0:52000], color="darkviolet", label='PBE: 300 K NVT', linewidth=1)
#plt.plot(time_ps[52000:], Tion_K[52000:], color="red", label='PBE: NVE', linewidth=1)
plt.minorticks_on()
plt.tick_params(axis='both', which='major', length=15, width=3)
plt.tick_params(axis='both', which='minor', length=5, width=3)
plt.xticks(np.arange(0, 5.4, 1))
plt.yticks(np.arange(200, 400.10, 50))
plt.ylim([200, 400])
plt.xticks(fontsize=25, fontweight='bold')
plt.yticks(fontsize=25, fontweight='bold')
plt.legend(loc=1, prop={'size': 20}, handlelength=3.5)
plt.xlabel('Time step [ps]', fontsize=25, fontweight='bold')
plt.ylabel('Temperature [K]', fontsize=25, fontweight='bold')
plt.savefig('water_temperature_pbe_300k_nvt.png', bbox_inches='tight', dpi=400)
plt.show()
plt.close()
plt.figure(figsize=(12,8))
plt.ticklabel_format(useOffset=False)
#plt.plot(time_ps[0:52000], Tion_K[0:52000], color="darkviolet", label='PBE: 300 K NVT', linewidth=1)
plt.plot(time_ps[52000:], Tion_K[52000:], color="red", label='PBE: NVE', linewidth=1)
plt.minorticks_on()
plt.tick_params(axis='both', which='major', length=15, width=3)
plt.tick_params(axis='both', which='minor', length=5, width=3)
plt.xticks(np.arange(5, 16.4, 1))
plt.yticks(np.arange(200, 400.10, 50))
plt.ylim([200, 400])
plt.xticks(fontsize=25, fontweight='bold')
plt.yticks(fontsize=25, fontweight='bold')
plt.legend(loc=1, prop={'size': 20}, handlelength=3.5)
plt.xlabel('Time step [ps]', fontsize=25, fontweight='bold')
plt.ylabel('Temperature [K]', fontsize=25, fontweight='bold')
plt.savefig('water_temperature_pbe_nve.png', bbox_inches='tight', dpi=400)
plt.show()
plt.close()
```
|
github_jupyter
|
import numpy as np
from numpy import *
import scipy.special as sp
from math import pow
import linecache
import math
from scipy.special import erf
from numpy.linalg import eig
import sys
from scipy import special
import matplotlib.pyplot as plt
import time
stepindex_ps=[]; time_ps=[]; ekinc_au=[]; Tion_K=[]; etot_au=[]; enthal_au=[]; econs_au=[]; econt_au=[]; Volume_au=[]
conversion = 27.211
with open("cubic_only_bulkwater.txt", 'r',buffering=100000) as f:
for line in f:
stepindex_ps.append(float(line.split()[0]))
time_ps.append(float(line.split()[1]))
ekinc_au.append(float(line.split()[2]))
Tion_K.append(float(line.split()[4]))
etot_au.append(float(line.split()[5]))
enthal_au.append(float(line.split()[6]))
econs_au.append(float(line.split()[7]))
econt_au.append(float(line.split()[8]))
Volume_au.append(float(line.split()[9]))
#print(econs_au)
#Convert data from atomic unit au to electron volts eV
enthal_eV = np.array(enthal_au) * conversion
#print(enthal_eV)
ekinc_eV = np.array(ekinc_au) * conversion
econs_eV = np.array(econs_au) * conversion
econt_eV = np.array(econt_au) * conversion
#NVT
#Calculates min, max and mean values of the parameters of interest
min_stepindex_ps = np.min(stepindex_ps[0:52000])
max_stepindex_ps = np.max( stepindex_ps[0:52000])
Ave_stepindex_ps = np.mean( stepindex_ps[0:52000])
dif_stepindex_ps = max_stepindex_ps - min_stepindex_ps
print("min_stepindex_ps =", min_stepindex_ps)
print("max_stepindex_ps =", max_stepindex_ps)
print("Ave_stepindex_ps =", Ave_stepindex_ps)
print("dif_stepindex_ps =", dif_stepindex_ps)
min_time_ps = np.min(time_ps[0:52000])
max_time_ps = np.max(time_ps[0:52000])
Ave_time_ps = np.mean(time_ps[0:52000])
dif_time_ps = max_time_ps - min_time_ps
print("min_time_ps =", min_time_ps)
print("max_time_ps =", max_time_ps)
print("Ave_time_ps =", Ave_time_ps)
print("dif_time_ps =", dif_time_ps)
min_Tion_K = np.min(Tion_K[0:52000])
max_Tion_K = np.max(Tion_K[0:52000])
Ave_Tion_K = np.mean(Tion_K[0:52000])
dif_Tion_K = max_Tion_K - min_Tion_K
print("min_Tion_K =", min_Tion_K)
print("max_Tion_K =", max_Tion_K)
print("Ave_Tion_K =", Ave_Tion_K)
print("dif_Tion_K =", dif_Tion_K)
min_ekinc_eV = np.min(ekinc_eV[0:52000])
max_ekinc_eV = np.max(ekinc_eV[0:52000])
Ave_ekinc_eV = np.mean(ekinc_eV[0:52000])
dif_ekinc_eV = max_ekinc_eV - min_ekinc_eV
print("min_ekinc_eV =", min_ekinc_eV)
print("max_ekinc_eV =", max_ekinc_eV)
print("Ave_ekinc_eV =", Ave_ekinc_eV)
print("dif_ekinc_eV =", dif_ekinc_eV)
min_econs_eV = np.min(econs_eV[0:52000])
max_econs_eV = np.max(econs_eV[0:52000])
Ave_econs_eV = np.mean(econs_eV[0:52000])
dif_econs_eV = max_econs_eV - min_econs_eV
print("min_econs_eV =", min_econs_eV)
print("max_econs_eV =", max_econs_eV)
print("Ave_econs_eV =", Ave_econs_eV)
print("dif_econs_eV =", dif_econs_eV)
min_econt_eV = np.min(econt_eV[0:52000])
max_econt_eV = np.max(econt_eV[0:52000])
Ave_econt_eV = np.mean(econt_eV[0:52000])
dif_econt_eV = max_econt_eV - min_econt_eV
print("min_econt_eV =", min_econt_eV)
print("max_econt_eV =", max_econt_eV)
print("Ave_econt_eV =", Ave_econt_eV)
print("dif_econt_eV =", dif_econt_eV)
#NVT
#Calculates the drift in temperature (K/ps) of the computed energies
#Calculates the drift in energy (eV/ps) of the computed energies
drift_Tion_K = dif_Tion_K/dif_stepindex_ps
print(drift_Tion_K)
drift_ekinc_eV = dif_ekinc_eV/dif_stepindex_ps
print(drift_ekinc_eV)
drift_econs_eV = dif_econs_eV/dif_stepindex_ps
print(drift_econs_eV)
drift_econt_eV = dif_econt_eV/dif_stepindex_ps
print(drift_econt_eV)
#NVE
#Calculates min, max and mean values of the parameters of interest
nve_min_stepindex_ps = np.min(stepindex_ps[52000:])
nve_max_stepindex_ps = np.max( stepindex_ps[52000:])
nve_Ave_stepindex_ps = np.mean( stepindex_ps[52000:])
nve_dif_stepindex_ps = nve_max_stepindex_ps - nve_min_stepindex_ps
print("nve_min_stepindex_ps =", nve_min_stepindex_ps)
print("nve_max_stepindex_ps =", nve_max_stepindex_ps)
print("nve_Ave_stepindex_ps =", nve_Ave_stepindex_ps)
print("nve_dif_stepindex_ps =", nve_dif_stepindex_ps)
nve_min_time_ps = np.min(time_ps[52000:])
nve_max_time_ps = np.max(time_ps[52000:])
nve_Ave_time_ps = np.mean(time_ps[52000:])
nve_dif_time_ps = nve_max_time_ps - nve_min_time_ps
print("nve_min_time_ps =", nve_min_time_ps)
print("nve_max_time_ps =", nve_max_time_ps)
print("nve_Ave_time_ps =", nve_Ave_time_ps)
print("nve_dif_time_ps =", nve_dif_time_ps)
nve_min_Tion_K = np.min(Tion_K[52000:])
nve_max_Tion_K = np.max(Tion_K[52000:])
nve_Ave_Tion_K = np.mean(Tion_K[52000:])
nve_dif_Tion_K = nve_max_Tion_K - nve_min_Tion_K
print("nve_min_Tion_K =", nve_min_Tion_K)
print("nve_max_Tion_K =", nve_max_Tion_K)
print("nve_Ave_Tion_K =", nve_Ave_Tion_K)
print("nve_dif_Tion_K =", nve_dif_Tion_K)
nve_min_ekinc_eV = np.min(ekinc_eV[52000:])
nve_max_ekinc_eV = np.max(ekinc_eV[52000:])
nve_Ave_ekinc_eV = np.mean(ekinc_eV[52000:])
nve_dif_ekinc_eV = nve_max_ekinc_eV - nve_min_ekinc_eV
print("nve_min_ekinc_eV =", nve_min_ekinc_eV)
print("nve_max_ekinc_eV =", nve_max_ekinc_eV)
print("nve_Ave_ekinc_eV =", nve_Ave_ekinc_eV)
print("nve_dif_ekinc_eV =", nve_dif_ekinc_eV)
nve_min_econs_eV = np.min(econs_eV[52000:])
nve_max_econs_eV = np.max(econs_eV[52000:])
nve_Ave_econs_eV = np.mean(econs_eV[52000:])
nve_dif_econs_eV = nve_max_econs_eV - nve_min_econs_eV
print("nve_min_econs_eV =", nve_min_econs_eV)
print("nve_max_econs_eV =", nve_max_econs_eV)
print("nve_Ave_econs_eV =", nve_Ave_econs_eV)
print("nve_dif_econs_eV =", nve_dif_econs_eV)
nve_min_econt_eV = np.min(econt_eV[52000:])
nve_max_econt_eV = np.max(econt_eV[52000:])
nve_Ave_econt_eV = np.mean(econt_eV[52000:])
nve_dif_econt_eV = nve_max_econt_eV - nve_min_econt_eV
print("nve_min_econt_eV =", nve_min_econt_eV)
print("nve_max_econt_eV =", nve_max_econt_eV)
print("nve_Ave_econt_eV =", nve_Ave_econt_eV)
print("nve_dif_econt_eV =", nve_dif_econt_eV)
#NVE
#Calculates the drift in temperature (K/ps) of the computed energies
#Calculates the drift in energy (eV/ps) of the computed energies
nve_drift_Tion_K = nve_dif_Tion_K/nve_dif_stepindex_ps
print(nve_drift_Tion_K)
nve_drift_ekinc_eV = nve_dif_ekinc_eV/nve_dif_stepindex_ps
print(nve_drift_ekinc_eV)
nve_drift_econs_eV = nve_dif_econs_eV/nve_dif_stepindex_ps
print(nve_drift_econs_eV)
nve_drift_econt_eV = nve_dif_econt_eV/nve_dif_stepindex_ps
print(nve_drift_econt_eV)
plt.figure(figsize=(12,8))
plt.ticklabel_format(useOffset=False)
plt.plot(np.array(time_ps)[0:52000], enthal_eV[0:52000], color="darkviolet", label='PBE: 300 K NVT', linewidth=1)
#plt.plot(np.array(time_ps)[52000:], enthal_eV[52000:], color="red", label='PBE: NVE', linewidth=1)
plt.minorticks_on()
plt.tick_params(axis='both', which='major', length=15, width=3)
plt.tick_params(axis='both', which='minor', length=5, width=3)
plt.xticks(np.arange(0, 5.4, 1))
plt.yticks(np.arange(-33160, -33152.2, 2))
plt.ylim([-33160, -33152.2])
plt.xticks(fontsize=25, fontweight='bold')
plt.yticks(fontsize=25, fontweight='bold')
plt.legend(loc=1, prop={'size': 20}, handlelength=3.5)
plt.xlabel('Time step [ps]', fontsize=25, fontweight='bold')
plt.ylabel('Enthalpy [eV]', fontsize=25, fontweight='bold')
plt.savefig('water_enthalpy_pbe_300k_nvt.png', bbox_inches='tight', dpi=400)
plt.show()
plt.close()
plt.figure(figsize=(12,8))
plt.ticklabel_format(useOffset=False)
#plt.plot(np.array(time_ps)[0:52000], enthal_eV[0:52000], color="darkviolet", label='PBE: 300 K NVT', linewidth=1)
plt.plot(np.array(time_ps)[52000:], enthal_eV[52000:], color="red", label='PBE: NVE', linewidth=1)
plt.minorticks_on()
plt.tick_params(axis='both', which='major', length=15, width=3)
plt.tick_params(axis='both', which='minor', length=5, width=3)
plt.xticks(np.arange(5, 16.4, 1))
plt.yticks(np.arange(-33157, -33152.2, 2))
plt.ylim([-33157, -33152.2])
plt.xticks(fontsize=25, fontweight='bold')
plt.yticks(fontsize=25, fontweight='bold')
plt.legend(loc=1, prop={'size': 20}, handlelength=3.5)
plt.xlabel('Time step [ps]', fontsize=25, fontweight='bold')
plt.ylabel('Enthalpy [eV]', fontsize=25, fontweight='bold')
plt.savefig('water_enthalpy_pbe_nve.png', bbox_inches='tight', dpi=400)
plt.show()
plt.close()
plt.figure(figsize=(12,8))
plt.ticklabel_format(useOffset=False)
plt.plot(time_ps[0:52000], Tion_K[0:52000], color="darkviolet", label='PBE: 300 K NVT', linewidth=1)
#plt.plot(time_ps[52000:], Tion_K[52000:], color="red", label='PBE: NVE', linewidth=1)
plt.minorticks_on()
plt.tick_params(axis='both', which='major', length=15, width=3)
plt.tick_params(axis='both', which='minor', length=5, width=3)
plt.xticks(np.arange(0, 5.4, 1))
plt.yticks(np.arange(200, 400.10, 50))
plt.ylim([200, 400])
plt.xticks(fontsize=25, fontweight='bold')
plt.yticks(fontsize=25, fontweight='bold')
plt.legend(loc=1, prop={'size': 20}, handlelength=3.5)
plt.xlabel('Time step [ps]', fontsize=25, fontweight='bold')
plt.ylabel('Temperature [K]', fontsize=25, fontweight='bold')
plt.savefig('water_temperature_pbe_300k_nvt.png', bbox_inches='tight', dpi=400)
plt.show()
plt.close()
plt.figure(figsize=(12,8))
plt.ticklabel_format(useOffset=False)
#plt.plot(time_ps[0:52000], Tion_K[0:52000], color="darkviolet", label='PBE: 300 K NVT', linewidth=1)
plt.plot(time_ps[52000:], Tion_K[52000:], color="red", label='PBE: NVE', linewidth=1)
plt.minorticks_on()
plt.tick_params(axis='both', which='major', length=15, width=3)
plt.tick_params(axis='both', which='minor', length=5, width=3)
plt.xticks(np.arange(5, 16.4, 1))
plt.yticks(np.arange(200, 400.10, 50))
plt.ylim([200, 400])
plt.xticks(fontsize=25, fontweight='bold')
plt.yticks(fontsize=25, fontweight='bold')
plt.legend(loc=1, prop={'size': 20}, handlelength=3.5)
plt.xlabel('Time step [ps]', fontsize=25, fontweight='bold')
plt.ylabel('Temperature [K]', fontsize=25, fontweight='bold')
plt.savefig('water_temperature_pbe_nve.png', bbox_inches='tight', dpi=400)
plt.show()
plt.close()
| 0.188361 | 0.329554 |
## 데이터 준비하기
```
from tensorflow.keras.datasets import cifar10
import numpy as np
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# 평균과 표준편차는 채널별로 구해줍니다.
x_mean = np.mean(x_train, axis = (0, 1, 2))
x_std = np.std(x_train, axis = (0, 1, 2))
x_train = (x_train - x_mean) / x_std
x_test = (x_test - x_mean) / x_std
from sklearn.model_selection import train_test_split
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train,
test_size = 0.3, random_state = 777)
print('data ready~')
```
## 모델 구성 및 학습하기
### 규제화 함수 사용해보기
```
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2
model = Sequential()
# 입력 데이터는 (75, 75, 3)의 형태를 가집니다.
model.add(Conv2D(filters = 32, kernel_size = 3, padding = 'same', activation = 'relu', input_shape = (32, 32, 3)))
model.add(Conv2D(filters = 32, kernel_size = 3, padding = 'same', activation = 'relu', kernel_regularizer = l2(0.001)))
model.add(MaxPool2D(pool_size = (2, 2), strides = 2, padding = 'same'))
model.add(Conv2D(filters = 64, kernel_size = 3, padding = 'same', activation = 'relu'))
model.add(Conv2D(filters = 64, kernel_size = 3, padding = 'same', activation = 'relu', kernel_regularizer = l2(0.001)))
model.add(MaxPool2D(pool_size = (2, 2), strides = 2, padding = 'same'))
model.add(Conv2D(filters = 128, kernel_size = 3, padding = 'same', activation = 'relu'))
model.add(Conv2D(filters = 128, kernel_size = 3, padding = 'same', activation = 'relu', kernel_regularizer = l2(0.001)))
model.add(MaxPool2D(pool_size = (2, 2), strides = 2, padding = 'same'))
model.add(Flatten())
model.add(Dense(256, activation = 'relu', kernel_regularizer = l2(0.001)))
model.add(Dense(10, activation = 'softmax'))
model.compile(optimizer = Adam(1e-4),
loss = 'sparse_categorical_crossentropy',
metrics = ['acc'])
history = model.fit(x_train, y_train,
epochs = 30,
batch_size = 32,
validation_data = (x_val, y_val))
```
## 학습 과정 그리기
```
import matplotlib.pyplot as plt
his_dict = history.history
loss = his_dict['loss']
val_loss = his_dict['val_loss']
epochs = range(1, len(loss) + 1)
fig = plt.figure(figsize = (10, 5))
# 훈련 및 검증 손실 그리기
ax1 = fig.add_subplot(1, 2, 1)
ax1.plot(epochs, loss, color = 'blue', label = 'train_loss')
ax1.plot(epochs, val_loss, color = 'orange', label = 'val_loss')
ax1.set_title('train and val loss')
ax1.set_xlabel('epochs')
ax1.set_ylabel('loss')
ax1.legend()
acc = his_dict['acc']
val_acc = his_dict['val_acc']
# 훈련 및 검증 정확도 그리기
ax2 = fig.add_subplot(1, 2, 2)
ax2.plot(epochs, acc, color = 'blue', label = 'train_acc')
ax2.plot(epochs, val_acc, color = 'orange', label = 'val_acc')
ax2.set_title('train and val acc')
ax2.set_xlabel('epochs')
ax2.set_ylabel('acc')
ax2.legend()
plt.show()
```
|
github_jupyter
|
from tensorflow.keras.datasets import cifar10
import numpy as np
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# 평균과 표준편차는 채널별로 구해줍니다.
x_mean = np.mean(x_train, axis = (0, 1, 2))
x_std = np.std(x_train, axis = (0, 1, 2))
x_train = (x_train - x_mean) / x_std
x_test = (x_test - x_mean) / x_std
from sklearn.model_selection import train_test_split
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train,
test_size = 0.3, random_state = 777)
print('data ready~')
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2
model = Sequential()
# 입력 데이터는 (75, 75, 3)의 형태를 가집니다.
model.add(Conv2D(filters = 32, kernel_size = 3, padding = 'same', activation = 'relu', input_shape = (32, 32, 3)))
model.add(Conv2D(filters = 32, kernel_size = 3, padding = 'same', activation = 'relu', kernel_regularizer = l2(0.001)))
model.add(MaxPool2D(pool_size = (2, 2), strides = 2, padding = 'same'))
model.add(Conv2D(filters = 64, kernel_size = 3, padding = 'same', activation = 'relu'))
model.add(Conv2D(filters = 64, kernel_size = 3, padding = 'same', activation = 'relu', kernel_regularizer = l2(0.001)))
model.add(MaxPool2D(pool_size = (2, 2), strides = 2, padding = 'same'))
model.add(Conv2D(filters = 128, kernel_size = 3, padding = 'same', activation = 'relu'))
model.add(Conv2D(filters = 128, kernel_size = 3, padding = 'same', activation = 'relu', kernel_regularizer = l2(0.001)))
model.add(MaxPool2D(pool_size = (2, 2), strides = 2, padding = 'same'))
model.add(Flatten())
model.add(Dense(256, activation = 'relu', kernel_regularizer = l2(0.001)))
model.add(Dense(10, activation = 'softmax'))
model.compile(optimizer = Adam(1e-4),
loss = 'sparse_categorical_crossentropy',
metrics = ['acc'])
history = model.fit(x_train, y_train,
epochs = 30,
batch_size = 32,
validation_data = (x_val, y_val))
import matplotlib.pyplot as plt
his_dict = history.history
loss = his_dict['loss']
val_loss = his_dict['val_loss']
epochs = range(1, len(loss) + 1)
fig = plt.figure(figsize = (10, 5))
# 훈련 및 검증 손실 그리기
ax1 = fig.add_subplot(1, 2, 1)
ax1.plot(epochs, loss, color = 'blue', label = 'train_loss')
ax1.plot(epochs, val_loss, color = 'orange', label = 'val_loss')
ax1.set_title('train and val loss')
ax1.set_xlabel('epochs')
ax1.set_ylabel('loss')
ax1.legend()
acc = his_dict['acc']
val_acc = his_dict['val_acc']
# 훈련 및 검증 정확도 그리기
ax2 = fig.add_subplot(1, 2, 2)
ax2.plot(epochs, acc, color = 'blue', label = 'train_acc')
ax2.plot(epochs, val_acc, color = 'orange', label = 'val_acc')
ax2.set_title('train and val acc')
ax2.set_xlabel('epochs')
ax2.set_ylabel('acc')
ax2.legend()
plt.show()
| 0.901075 | 0.934155 |
# Computación científica y ciencia de datos
*Ciencia: Scientia: Conocimiento*
## Los paradigmas de la ciencia:
<img src="https://www.researchgate.net/profile/Marcio_Costa13/publication/331216708/figure/fig4/AS:728393284870152@1550673900958/The-four-science-paradigms-empirical-theoretical-computational-and-data-driven-Each.png" width="600">
- El primer paradigma es basado en observación y experimentación
- El segundo paradigma agregó es el teórico, se basa en probar modelos y refutar hipótesis
- El tercer paradigma es basado en simulaciones: Probamos modelos y análisamos fenómenos utilizando simulaciones computacionales
- El [cuarto paradigma](https://en.wikipedia.org/wiki/The_Fourth_Paradigm) es basado en los datos: Entender fenómenos en base a datos masivos generados por sensores o simulaciones
## Tercer paradigma: Computación Científica
Hoy en día muchos trabajos científicos e ingenieriles incluyen al menos uno de los siguientes aspectos
- cálculos numéricos
- simulaciones
- modelamiento computacional
- análisis de datos
En resumen
> El computador se ha vuelto esencial para hacer ciencia
y por ende
> El software se ha vuelto esencial para hacer ciencia
En este escenario surge
> La **computación científica** es la disciplina que se encarga de desarrollar la teoría y las técnicas necesarias para resolver problemas matemáticos complejos de ciencias e ingeniería de forma eficiente
**Ejemplo:** Simulación de la unión de dos agujeros negros utilizando datos de LIGO
<img src="https://2.bp.blogspot.com/-h3SA26JUbB4/Vt86wugg1rI/AAAAAAAAzWE/DYuiVN4B8QA/s400/Two-Black-Holes-Merge-into-One.gif" width="500">
## Cuarto paradigma: Ciencia de los Datos
Los avances tecnológicos han permitido la generación y captura de una enorme cantidad de datos
Considere por ejemplo las Redes sociales, el internet de las cosas o los proyectos smart-city
Esto también ocurre en las ciencias
- Simulaciones con cada vez mayor nivel de resolución
- Telescopios que cubren mayores áreas del cielo y con mayor profunidad
- Digitalización de exámenes médicos
El término [*Big Data*](https://www.ibmbigdatahub.com/infographic/four-vs-big-data) se ocupa para describir el escenario tecnológico actual respecto a la abundancia, altas tazas de generación y variedad de datos
En este escenario surge:
> La **ciencia de datos** es la disciplina que busca extraer conocimiento (información) a partir de datos (masivos)
**Ejemplo:** [Super-resolución en imágenes astronómicas usando modelo adversario generativo entrenado en datos de SDSS](https://academic.oup.com/mnrasl/article/467/1/L110/2931732)
<img src="https://scx1.b-cdn.net/csz/news/800/2017/1-neuralnetwor.png" width="500">
**Competencias**
La computación científica y la ciencia de los datos combinan competencias de
- Ciencias de la computación: Diseñar algoritmos eficientes y escalables para procesar grandes bases de datos
- Matemáticas aplicadas y estadística: Diseñar modelos que caractericen y resuman los datos
**Interdiscipina**
La computación científica y la ciencia de los datos son paradigmas naturalmente **interdisciplinares**
> Se debe colaborar con expertos del dominio para formular preguntas científicas e intepretar los resultados de nuestros rutinas computacionales
**Reproducibilidad**
> Un buen software científico debe permitir la replicación y reproducibilidad de los resultados de los experimentos
## ¿Qué podemos esperar de este curso?
En este curso aprenderemos a manejar librerías de computación científica basadas en el lenguaje de programación **Python**
## ¿Qué es Python?
Python es un lenguaje de programación interpretado y multi-paradigma liberado en 1991 y ampliamente usado en ciencia de los datos
¿Por qué?
- Enfoque en la simpleza, lo cual resulta en código más fácil de mantener y desarrollo más rápido
- Menor curva de aprendizaje
- Rico ecosistema de librerías abiertas para computo científico: [*Numpy*](http://www.numpy.org/), [*Scipy*](https://www.scipy.org/), [*Pandas*](https://pandas.pydata.org/), entre otras que serán estudiadas en este curso
¿Y que hay del desempeño de mis algoritmos versus lenguajes de programación de más bajo nivel?
1. Trade-off entre tiempo de cómputo y horas hombre de programación
1. Librerías de cómputo científico de Python: Están compiladas con librerías optimizadas de bajo nivel (*e.g.* BLAS, LAPACK)
1. Si la función que necesito no está en las librerias: Integrar Python con lenguajes de bajo nivel (*e.g.* C++, Fortran)
|
github_jupyter
|
# Computación científica y ciencia de datos
*Ciencia: Scientia: Conocimiento*
## Los paradigmas de la ciencia:
<img src="https://www.researchgate.net/profile/Marcio_Costa13/publication/331216708/figure/fig4/AS:728393284870152@1550673900958/The-four-science-paradigms-empirical-theoretical-computational-and-data-driven-Each.png" width="600">
- El primer paradigma es basado en observación y experimentación
- El segundo paradigma agregó es el teórico, se basa en probar modelos y refutar hipótesis
- El tercer paradigma es basado en simulaciones: Probamos modelos y análisamos fenómenos utilizando simulaciones computacionales
- El [cuarto paradigma](https://en.wikipedia.org/wiki/The_Fourth_Paradigm) es basado en los datos: Entender fenómenos en base a datos masivos generados por sensores o simulaciones
## Tercer paradigma: Computación Científica
Hoy en día muchos trabajos científicos e ingenieriles incluyen al menos uno de los siguientes aspectos
- cálculos numéricos
- simulaciones
- modelamiento computacional
- análisis de datos
En resumen
> El computador se ha vuelto esencial para hacer ciencia
y por ende
> El software se ha vuelto esencial para hacer ciencia
En este escenario surge
> La **computación científica** es la disciplina que se encarga de desarrollar la teoría y las técnicas necesarias para resolver problemas matemáticos complejos de ciencias e ingeniería de forma eficiente
**Ejemplo:** Simulación de la unión de dos agujeros negros utilizando datos de LIGO
<img src="https://2.bp.blogspot.com/-h3SA26JUbB4/Vt86wugg1rI/AAAAAAAAzWE/DYuiVN4B8QA/s400/Two-Black-Holes-Merge-into-One.gif" width="500">
## Cuarto paradigma: Ciencia de los Datos
Los avances tecnológicos han permitido la generación y captura de una enorme cantidad de datos
Considere por ejemplo las Redes sociales, el internet de las cosas o los proyectos smart-city
Esto también ocurre en las ciencias
- Simulaciones con cada vez mayor nivel de resolución
- Telescopios que cubren mayores áreas del cielo y con mayor profunidad
- Digitalización de exámenes médicos
El término [*Big Data*](https://www.ibmbigdatahub.com/infographic/four-vs-big-data) se ocupa para describir el escenario tecnológico actual respecto a la abundancia, altas tazas de generación y variedad de datos
En este escenario surge:
> La **ciencia de datos** es la disciplina que busca extraer conocimiento (información) a partir de datos (masivos)
**Ejemplo:** [Super-resolución en imágenes astronómicas usando modelo adversario generativo entrenado en datos de SDSS](https://academic.oup.com/mnrasl/article/467/1/L110/2931732)
<img src="https://scx1.b-cdn.net/csz/news/800/2017/1-neuralnetwor.png" width="500">
**Competencias**
La computación científica y la ciencia de los datos combinan competencias de
- Ciencias de la computación: Diseñar algoritmos eficientes y escalables para procesar grandes bases de datos
- Matemáticas aplicadas y estadística: Diseñar modelos que caractericen y resuman los datos
**Interdiscipina**
La computación científica y la ciencia de los datos son paradigmas naturalmente **interdisciplinares**
> Se debe colaborar con expertos del dominio para formular preguntas científicas e intepretar los resultados de nuestros rutinas computacionales
**Reproducibilidad**
> Un buen software científico debe permitir la replicación y reproducibilidad de los resultados de los experimentos
## ¿Qué podemos esperar de este curso?
En este curso aprenderemos a manejar librerías de computación científica basadas en el lenguaje de programación **Python**
## ¿Qué es Python?
Python es un lenguaje de programación interpretado y multi-paradigma liberado en 1991 y ampliamente usado en ciencia de los datos
¿Por qué?
- Enfoque en la simpleza, lo cual resulta en código más fácil de mantener y desarrollo más rápido
- Menor curva de aprendizaje
- Rico ecosistema de librerías abiertas para computo científico: [*Numpy*](http://www.numpy.org/), [*Scipy*](https://www.scipy.org/), [*Pandas*](https://pandas.pydata.org/), entre otras que serán estudiadas en este curso
¿Y que hay del desempeño de mis algoritmos versus lenguajes de programación de más bajo nivel?
1. Trade-off entre tiempo de cómputo y horas hombre de programación
1. Librerías de cómputo científico de Python: Están compiladas con librerías optimizadas de bajo nivel (*e.g.* BLAS, LAPACK)
1. Si la función que necesito no está en las librerias: Integrar Python con lenguajes de bajo nivel (*e.g.* C++, Fortran)
| 0.553505 | 0.901401 |
# 1. Gender Classification using Random Forest
---
Given a name, we wish to determine the gender of a person. We first discuss data gathering and jump into feature selection. Techniques to chose the right features are also discussed. With 800 features, a performance of 82% is obtained. However, chosing just 4 feature yields an accuracy of up to 75%!
## 1.1 Modules and Data Gathering
```
import collections
from nltk.corpus import names #You won't need this, I'll provide the files
import random
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from string import ascii_uppercase
```
Let us take a look at how these modules are used in context of our Gender Classification problem.
- **collections** : Used to get the Frequency distribution of alphabets in a name. Also used to get Frequency of alphabet pairs and triplets
- **names** : We use the Corpus of the Natural Language Processing Toolkit (NLTK) to get our sample names. To Save you the trouble of installing NLTK for a mere dataset, I will include the list of male names _male.txt_ and female names _female.txt_ in the Repository. Here is a sample of 10 names in each file, accessed with or without NLTK. They produce the same output.
```
#With NLTK
male_names = names.words('male.txt')
female_names = names.words('female.txt')
#Print top 10 names in list
print("Male name list : ", [name for name in male_names[:10]])
print("Female name list : ", [name for name in female_names[:10]])
#Without NLTK
with open('male.txt','r') as min:
male_names = [name.strip('\r\n') for name in min.readlines()]
with open('female.txt','r') as fin:
female_names = [name.strip('\r\n') for name in fin.readlines()]
#Print top 10 names in list
print("Male name list : ", [name for name in male_names[:10]])
print("Female name list : ", [name for name in female_names[:10]])
```
Each list will now contain 5,000 male and female names respectively, constructing a dataset of size 10,000.
- **random** : Male and Female names should be shuffled for training.
- **numpy** : Reshapes testing data from 2D matrix to 1D Vector. This prevents the deprecation warning.
- **RandomForestClassifier** : As a part of python's _sklearn_, the Random Forest Classifier is the learning algorithm used for gender classification.
- **accuracy_score** : Determines the performance of our classifier.
- **ascii_uppercase** : is a string of all uppercase alphabets. The uppercase elements form the _keys_ of our _One-hot Encoding dictionaries_. One-hot Encoding will be explained when encountered in the code.
```
ascii_uppercase
```
## 1.2 Data Preperation & One Hot Encoding
We begin by filtering out names that contain special characters.
```
#Get rid of names with non_alphabetic characters
male_names = filter(str.isalpha, [str(m) for m in male_names]) #Convert unicode array to string array
female_names = filter(str.isalpha, [str(f) for f in female_names])
```
Convert all names to upper case. This is done to ensure character frequency distribution is not case sensitive. We provide labels 'M' to signify _male_ and 'F' for _female_.
```
all_names = []
for name in male_names:
all_names.append( (name.upper(),'M') )
for name in female_names:
all_names.append( (name.upper(),'F') )
```
One-hot comes from originally from electronics - _one-hot_ meaning there's only 1 _hot_ or _on_ value in this list, while the rest are _cold_. One Hot encoding is used to transform non-numeric variables into corresponding numeric representation that can be better processed by a classifier. Although decision trees like Random Forest used in our case, can perform well without One Hot Encoding, I demonstrate it here so that you can try it with other classifiers if required.
```
#Create One-hot Encoding dictionary from element string
def create_one_hot(eles):
one_hot = {}
for i, l in enumerate(eles):
bits = [0]*len(eles); #Every element in the string/list is assigned 0
bits[i] = 1; #Only one bit is set to "ON"
one_hot[l] = bits #Actual assignment is made
return one_hot
```
Let us transform each character into It's One_Hot vector. Since there are 26 alphabets, each character is transformed into a 26 dimensional one_hot vector as shown.
```
mono_alpha_hot = create_one_hot(ascii_uppercase)
for i, l in enumerate(ascii_uppercase):
print(l, " : ", mono_alpha_hot[l])
```
We can also create alphabet pairs and perform one_hot encoding. In this case, each alphbet pair is represented as a 676 dimensional vector.
```
bi_alphabets = [a+b for a in ascii_uppercase for b in ascii_uppercase]
bi_alpha_hot = create_one_hot(bi_alphabets)
```
You could also create alphabet triplets. However, there are way too many combinations when one hot encoding is performed (26 x 26 x 26 = 17576 possibilities!). So we do not compute their vectors. If you are still interested to see how it runs, try executing the following snippet - similar to alphabet pair creation.
```
#Crete Alphabet Triplets (Not Recommended)
tri_alphabets = [a+b+c for a in ascii_uppercase for b in ascii_uppercase for c in ascii_uppercase]
tri_alpha_hot = create_one_hot(tri_alphabets)
```
## 1.3 Choosing Features
The list of features initially observed are :
- First letter (26 features)
- Last Letter (26 features)
- Second Letter (26 features)
- Sencond from last Letter (26 features)
- Frequency of each alphabet (26 features)
- Frequency of alphabet pairs (26 x 26 features)
Let us create the list of features for each name sample:
```
feat_names = []
feat_names.extend( ['Starts with '+ a for a in mono_alpha_hot.keys()] )
feat_names.extend( ['2nd Character '+ a for a in mono_alpha_hot.keys()] )
feat_names.extend( ['2nd Character from last '+ a for a in mono_alpha_hot.keys()] )
feat_names.extend( ['Ends with '+ a for a in mono_alpha_hot.keys()] )
feat_names.extend( ['Freqency of '+ a for a in list(ascii_uppercase)] )
feat_names.extend( ['Contains '+ a for a in list(bi_alphabets)] )
#Displaying the first 10 feature names
print(feat_names[:10])
```
We write a method `get_sample` that determines the feature for a given sample `(name, gender)`. The method returns a tuple of:
- _features_ : Vector of numeric features
- _classification_ : The gender represented as '0' for 'Male' and '1' for 'Female'
```
def get_sample(name, gender):
features = []
name = name.strip()
##First Character
features.extend( mono_alpha_hot[name[0]] )
##Second Character
features.extend( mono_alpha_hot[name[1]] )
##Second Character from Last
features.extend( mono_alpha_hot[name[-2]] )
##Last Character
features.extend( mono_alpha_hot[name[-1]] )
##Frequency of Alphabets
freq = {key : 0 for key in list(ascii_uppercase)} #Initialize all keys to 0 for every Alphabet
updates = dict(collections.Counter(name)) #Get the frequency distribution of characters in 'name'
freq.update(updates) #update the original values of the dictionary
features.extend( freq.values() ) #Append the list of values
##Frequency of Alphabet pairs
freq = {key : 0 for key in list(bi_alphabets)} #Initialize all keys to 0 for every Alphabet Pair
updates = dict(collections.Counter( zip(name, name[1:]) )) #Freq. Distribution in the name in the form (A,B): n
updates = {(A+B):n for (A,B),n in zip(updates.keys(),updates.values())} #Convert (A,B):n to dictionary of "AB":n.
freq.update(updates)
features.extend( freq.values() ) #Append the list of values
##Gender Label
#classification = gender_hot[gender]
if gender == 'M':
classification = 0
else:
classification = 1
return (features, classification)
```
The method is invoked for every sample encountered and stored into a separate list of tuples `feature_list`.
```
feature_list = [get_sample(name, gender) for name, gender in all_names]
```
Lets shuffle the Male and Female names so that we get a well distributed training and testing set. We split the training and testing set such that we have 7,000 training samples and the rest are used for testing. To get an idea of the nature of training and testing data we are dealing with, I will print the shape of their matricies and vectors.
```
#Shuffle list to make sure Male And Female are mixed well
random.shuffle(feature_list)
#Split test and train set
train_set = feature_list[:7000]
test_set = feature_list[7000:]
#Conversion to the correct format
X_train, y_train = zip(*train_set) #converts list of 2-field tuples to 2 separate lists
X_test, y_test = zip(*test_set)
print("Shape of Train Features :" , np.array(X_train).shape)
print("Shape of Train Labels :" , np.array(y_train).shape)
print("Shape of Test Features :" , np.array(X_test).shape)
print("Shape of Test Labels :" , np.array(y_test).shape)
```
Notice we have only 904 test samples and not 3,000. The extra samples had special chracters and were removed in the beginning. We now train a model using the Random Forest Classifier.
```
classifier = RandomForestClassifier(n_estimators=150, min_samples_split=20)
classifier.fit(X_train, y_train) #Performs the actual "training" phase
```
We create a list of predictions `y_pred`.
```
y_pred = []
for i in range(0,len(X_test)):
y_pred.extend(classifier.predict(np.array(X_test[i]).reshape(1, -1)))
#With just "classifier.predict(X_test[i])" gives a deprecation warning.
#We convert it the 2 dimensional array to a 1 D Vector with reshape
```
The list of actual gender classifications `y_test` is used to evaluate the predicted values `y_pred` and a simple accuracy is determined.
```
print(accuracy_score(y_test, y_pred))
```
We get up to an 82% accuracy. Not Bad! But this classifier has a ton of features (806 to be precise). Are they all required? Let us determine the most important features.
```
important_features = sorted(enumerate(classifier.feature_importances_), key=lambda x : x[1], reverse=True)
print ("Most Important Features : ")
[(feat_names[idx],prob) for idx, prob in important_features][:20]
```
## 1.4 Feature Reduction
### 1.4.1 One Feature
We will rewrite this program using the top features, including them one at a time and observe performance.This is done by redefining the method `get_sample` to include 1 feature. The value is `0` if the name does not end in A, otherwise it takes the value `1`.
```
def get_sample(name, gender):
features = []
name = name.strip()
##Ends with A
if name[-1] == 'A':
features.append(1)
else:
features.append(0)
##Gender Label
if gender == 'M':
classification = 0
else:
classification = 1
return (features, classification)
```
With this miniature definition, we perform the same steps of splitting data into test and train sets, commence training using Random Forest and determine the accuracy of the new model.
```
feature_list = [ get_sample(name, gender) for name, gender in all_names]
print("Accuracy with top feature")
for i in range(10):
#Shuffle list to make sure Male And Female are mixed well
random.shuffle(feature_list)
#Split test and train set
train_set = feature_list[:7000]
test_set = feature_list[7000:]
#Conversion to the correct format
X_train, y_train = zip(*train_set) #converts list of 2-field tuples to 2 separate lists
X_test, y_test = zip(*test_set)
# Random Forest Classifier
classifier = RandomForestClassifier(n_estimators=150, min_samples_split=20)
classifier.fit(X_train, y_train) #Performs the actual "training" phase
y_pred = []
for j in range(0,len(X_test)):
y_pred.extend(classifier.predict(np.array(X_test[j]).reshape(1, -1)))
print("Epoch ", i , " : ", accuracy_score(y_test, y_pred))
```
Wow! By just checking if the last character of the name is 'A', our classifier is able to determine the gender over 60% of the time.
### 1.4.2 Two Features
Let us now include the second most important feature i.e. the frequency of 'A'
```
def get_sample(name, gender):
features = []
name = name.strip()
##Ends with A
if name[-1] == 'A':
features.append(1)
else:
features.append(0)
##Freq of A
features.append( name.count('A') )
##Gender Label
if gender == 'M':
classification = 0
else:
classification = 1
return (features, classification)
```
We execute it again.
```
feature_list = [ get_sample(name, gender) for name, gender in all_names]
print("Accuracy with top 2 features")
for i in range(10):
#Shuffle list to make sure Male And Female are mixed well
random.shuffle(feature_list)
#Split test and train set
train_set = feature_list[:7000]
test_set = feature_list[7000:]
#Conversion to the correct format
X_train, y_train = zip(*train_set) #converts list of 2-field tuples to 2 separate lists
X_test, y_test = zip(*test_set)
# Random Forest Classifier
classifier = RandomForestClassifier(n_estimators=150, min_samples_split=20)
classifier.fit(X_train, y_train) #Performs the actual "training" phase
y_pred = []
for j in range(0,len(X_test)):
y_pred.extend(classifier.predict(np.array(X_test[j]).reshape(1, -1)))
print("Epoch ", i , " : ", accuracy_score(y_test, y_pred))
```
### 1.4.3 Three Features
Let us include the feature that checks if a name ends with 'E'. The `get_sample` method is redefined as shown.
```
def get_sample(name, gender):
features = []
name = name.strip()
##Ends with A
if name[-1] == 'A':
features.append(1)
else:
features.append(0)
##Ends with 'E'
if name[-1] == 'E':
features.append(1)
else:
features.append(0)
#Freq of A
features.append( name.count('A') )
##Gender Label
if gender == 'M':
classification = 0
else:
classification = 1
return (features, classification)
```
Now lets see the results
```
feature_list = [ get_sample(name, gender) for name, gender in all_names]
print("Accuracy with top 3 features")
for i in range(10):
#Shuffle list to make sure Male And Female are mixed well
random.shuffle(feature_list)
#Split test and train set
train_set = feature_list[:7000]
test_set = feature_list[7000:]
#Conversion to the correct format
X_train, y_train = zip(*train_set) #converts list of 2-field tuples to 2 separate lists
X_test, y_test = zip(*test_set)
# Random Forest Classifier
classifier = RandomForestClassifier(n_estimators=150, min_samples_split=20)
classifier.fit(X_train, y_train) #Performs the actual "training" phase
y_pred = []
for j in range(0,len(X_test)):
y_pred.extend(classifier.predict(np.array(X_test[j]).reshape(1, -1)))
print("Epoch ", i , " : ", accuracy_score(y_test, y_pred))
```
Now we are getting somewhere! We get a 72% accuracy with the top 3 features
### 1.4.4 Four Features
For the last case, lets take the top 4 features
```
def get_sample(name, gender):
features = []
name = name.strip()
##Ends with A
if name[-1] == 'A':
features.append(1)
else:
features.append(0)
##Ends with 'E'
if name[-1] == 'E':
features.append(1)
else:
features.append(0)
#Freq of A
features.append( name.count('A') )
##2nd character from end is N
if name[-2] == 'N':
features.append(1)
else:
features.append(0)
##Gender Label
if gender == 'M':
classification = 0
else:
classification = 1
return (features, classification)
```
Lets see the results!
```
feature_list = [ get_sample(name, gender) for name, gender in all_names]
print("Accuracy with top 4 features")
for i in range(10):
#Shuffle list to make sure Male And Female are mixed well
random.shuffle(feature_list)
#Split test and train set
train_set = feature_list[:7000]
test_set = feature_list[7000:]
#Conversion to the correct format
X_train, y_train = zip(*train_set) #converts list of 2-field tuples to 2 separate lists
X_test, y_test = zip(*test_set)
# Random Forest Classifier
classifier = RandomForestClassifier(n_estimators=150, min_samples_split=20)
classifier.fit(X_train, y_train) #Performs the actual "training" phase
y_pred = []
for j in range(0,len(X_test)):
y_pred.extend(classifier.predict(np.array(X_test[j]).reshape(1, -1)))
print("Epoch ", i , " : ", accuracy_score(y_test, y_pred))
```
## 1.5 Final Thoughts
Note that the performance of your gender classification system heavily depends on your dataset. If all your names are from a particular region, say the dataset of only American/Chinese/Indian names, your classifier would perform best at classifying such names.
|
github_jupyter
|
import collections
from nltk.corpus import names #You won't need this, I'll provide the files
import random
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from string import ascii_uppercase
#With NLTK
male_names = names.words('male.txt')
female_names = names.words('female.txt')
#Print top 10 names in list
print("Male name list : ", [name for name in male_names[:10]])
print("Female name list : ", [name for name in female_names[:10]])
#Without NLTK
with open('male.txt','r') as min:
male_names = [name.strip('\r\n') for name in min.readlines()]
with open('female.txt','r') as fin:
female_names = [name.strip('\r\n') for name in fin.readlines()]
#Print top 10 names in list
print("Male name list : ", [name for name in male_names[:10]])
print("Female name list : ", [name for name in female_names[:10]])
ascii_uppercase
#Get rid of names with non_alphabetic characters
male_names = filter(str.isalpha, [str(m) for m in male_names]) #Convert unicode array to string array
female_names = filter(str.isalpha, [str(f) for f in female_names])
all_names = []
for name in male_names:
all_names.append( (name.upper(),'M') )
for name in female_names:
all_names.append( (name.upper(),'F') )
#Create One-hot Encoding dictionary from element string
def create_one_hot(eles):
one_hot = {}
for i, l in enumerate(eles):
bits = [0]*len(eles); #Every element in the string/list is assigned 0
bits[i] = 1; #Only one bit is set to "ON"
one_hot[l] = bits #Actual assignment is made
return one_hot
mono_alpha_hot = create_one_hot(ascii_uppercase)
for i, l in enumerate(ascii_uppercase):
print(l, " : ", mono_alpha_hot[l])
bi_alphabets = [a+b for a in ascii_uppercase for b in ascii_uppercase]
bi_alpha_hot = create_one_hot(bi_alphabets)
#Crete Alphabet Triplets (Not Recommended)
tri_alphabets = [a+b+c for a in ascii_uppercase for b in ascii_uppercase for c in ascii_uppercase]
tri_alpha_hot = create_one_hot(tri_alphabets)
feat_names = []
feat_names.extend( ['Starts with '+ a for a in mono_alpha_hot.keys()] )
feat_names.extend( ['2nd Character '+ a for a in mono_alpha_hot.keys()] )
feat_names.extend( ['2nd Character from last '+ a for a in mono_alpha_hot.keys()] )
feat_names.extend( ['Ends with '+ a for a in mono_alpha_hot.keys()] )
feat_names.extend( ['Freqency of '+ a for a in list(ascii_uppercase)] )
feat_names.extend( ['Contains '+ a for a in list(bi_alphabets)] )
#Displaying the first 10 feature names
print(feat_names[:10])
def get_sample(name, gender):
features = []
name = name.strip()
##First Character
features.extend( mono_alpha_hot[name[0]] )
##Second Character
features.extend( mono_alpha_hot[name[1]] )
##Second Character from Last
features.extend( mono_alpha_hot[name[-2]] )
##Last Character
features.extend( mono_alpha_hot[name[-1]] )
##Frequency of Alphabets
freq = {key : 0 for key in list(ascii_uppercase)} #Initialize all keys to 0 for every Alphabet
updates = dict(collections.Counter(name)) #Get the frequency distribution of characters in 'name'
freq.update(updates) #update the original values of the dictionary
features.extend( freq.values() ) #Append the list of values
##Frequency of Alphabet pairs
freq = {key : 0 for key in list(bi_alphabets)} #Initialize all keys to 0 for every Alphabet Pair
updates = dict(collections.Counter( zip(name, name[1:]) )) #Freq. Distribution in the name in the form (A,B): n
updates = {(A+B):n for (A,B),n in zip(updates.keys(),updates.values())} #Convert (A,B):n to dictionary of "AB":n.
freq.update(updates)
features.extend( freq.values() ) #Append the list of values
##Gender Label
#classification = gender_hot[gender]
if gender == 'M':
classification = 0
else:
classification = 1
return (features, classification)
feature_list = [get_sample(name, gender) for name, gender in all_names]
#Shuffle list to make sure Male And Female are mixed well
random.shuffle(feature_list)
#Split test and train set
train_set = feature_list[:7000]
test_set = feature_list[7000:]
#Conversion to the correct format
X_train, y_train = zip(*train_set) #converts list of 2-field tuples to 2 separate lists
X_test, y_test = zip(*test_set)
print("Shape of Train Features :" , np.array(X_train).shape)
print("Shape of Train Labels :" , np.array(y_train).shape)
print("Shape of Test Features :" , np.array(X_test).shape)
print("Shape of Test Labels :" , np.array(y_test).shape)
classifier = RandomForestClassifier(n_estimators=150, min_samples_split=20)
classifier.fit(X_train, y_train) #Performs the actual "training" phase
y_pred = []
for i in range(0,len(X_test)):
y_pred.extend(classifier.predict(np.array(X_test[i]).reshape(1, -1)))
#With just "classifier.predict(X_test[i])" gives a deprecation warning.
#We convert it the 2 dimensional array to a 1 D Vector with reshape
print(accuracy_score(y_test, y_pred))
important_features = sorted(enumerate(classifier.feature_importances_), key=lambda x : x[1], reverse=True)
print ("Most Important Features : ")
[(feat_names[idx],prob) for idx, prob in important_features][:20]
def get_sample(name, gender):
features = []
name = name.strip()
##Ends with A
if name[-1] == 'A':
features.append(1)
else:
features.append(0)
##Gender Label
if gender == 'M':
classification = 0
else:
classification = 1
return (features, classification)
feature_list = [ get_sample(name, gender) for name, gender in all_names]
print("Accuracy with top feature")
for i in range(10):
#Shuffle list to make sure Male And Female are mixed well
random.shuffle(feature_list)
#Split test and train set
train_set = feature_list[:7000]
test_set = feature_list[7000:]
#Conversion to the correct format
X_train, y_train = zip(*train_set) #converts list of 2-field tuples to 2 separate lists
X_test, y_test = zip(*test_set)
# Random Forest Classifier
classifier = RandomForestClassifier(n_estimators=150, min_samples_split=20)
classifier.fit(X_train, y_train) #Performs the actual "training" phase
y_pred = []
for j in range(0,len(X_test)):
y_pred.extend(classifier.predict(np.array(X_test[j]).reshape(1, -1)))
print("Epoch ", i , " : ", accuracy_score(y_test, y_pred))
def get_sample(name, gender):
features = []
name = name.strip()
##Ends with A
if name[-1] == 'A':
features.append(1)
else:
features.append(0)
##Freq of A
features.append( name.count('A') )
##Gender Label
if gender == 'M':
classification = 0
else:
classification = 1
return (features, classification)
feature_list = [ get_sample(name, gender) for name, gender in all_names]
print("Accuracy with top 2 features")
for i in range(10):
#Shuffle list to make sure Male And Female are mixed well
random.shuffle(feature_list)
#Split test and train set
train_set = feature_list[:7000]
test_set = feature_list[7000:]
#Conversion to the correct format
X_train, y_train = zip(*train_set) #converts list of 2-field tuples to 2 separate lists
X_test, y_test = zip(*test_set)
# Random Forest Classifier
classifier = RandomForestClassifier(n_estimators=150, min_samples_split=20)
classifier.fit(X_train, y_train) #Performs the actual "training" phase
y_pred = []
for j in range(0,len(X_test)):
y_pred.extend(classifier.predict(np.array(X_test[j]).reshape(1, -1)))
print("Epoch ", i , " : ", accuracy_score(y_test, y_pred))
def get_sample(name, gender):
features = []
name = name.strip()
##Ends with A
if name[-1] == 'A':
features.append(1)
else:
features.append(0)
##Ends with 'E'
if name[-1] == 'E':
features.append(1)
else:
features.append(0)
#Freq of A
features.append( name.count('A') )
##Gender Label
if gender == 'M':
classification = 0
else:
classification = 1
return (features, classification)
feature_list = [ get_sample(name, gender) for name, gender in all_names]
print("Accuracy with top 3 features")
for i in range(10):
#Shuffle list to make sure Male And Female are mixed well
random.shuffle(feature_list)
#Split test and train set
train_set = feature_list[:7000]
test_set = feature_list[7000:]
#Conversion to the correct format
X_train, y_train = zip(*train_set) #converts list of 2-field tuples to 2 separate lists
X_test, y_test = zip(*test_set)
# Random Forest Classifier
classifier = RandomForestClassifier(n_estimators=150, min_samples_split=20)
classifier.fit(X_train, y_train) #Performs the actual "training" phase
y_pred = []
for j in range(0,len(X_test)):
y_pred.extend(classifier.predict(np.array(X_test[j]).reshape(1, -1)))
print("Epoch ", i , " : ", accuracy_score(y_test, y_pred))
def get_sample(name, gender):
features = []
name = name.strip()
##Ends with A
if name[-1] == 'A':
features.append(1)
else:
features.append(0)
##Ends with 'E'
if name[-1] == 'E':
features.append(1)
else:
features.append(0)
#Freq of A
features.append( name.count('A') )
##2nd character from end is N
if name[-2] == 'N':
features.append(1)
else:
features.append(0)
##Gender Label
if gender == 'M':
classification = 0
else:
classification = 1
return (features, classification)
feature_list = [ get_sample(name, gender) for name, gender in all_names]
print("Accuracy with top 4 features")
for i in range(10):
#Shuffle list to make sure Male And Female are mixed well
random.shuffle(feature_list)
#Split test and train set
train_set = feature_list[:7000]
test_set = feature_list[7000:]
#Conversion to the correct format
X_train, y_train = zip(*train_set) #converts list of 2-field tuples to 2 separate lists
X_test, y_test = zip(*test_set)
# Random Forest Classifier
classifier = RandomForestClassifier(n_estimators=150, min_samples_split=20)
classifier.fit(X_train, y_train) #Performs the actual "training" phase
y_pred = []
for j in range(0,len(X_test)):
y_pred.extend(classifier.predict(np.array(X_test[j]).reshape(1, -1)))
print("Epoch ", i , " : ", accuracy_score(y_test, y_pred))
| 0.28897 | 0.963248 |
# Supervised sentiment: hand-built feature functions
```
__author__ = "Christopher Potts"
__version__ = "CS224u, Stanford, Spring 2020"
```
## Contents
1. [Overview](#Overview)
1. [Set-up](#Set-up)
1. [Feature functions](#Feature-functions)
1. [Building datasets for experiments](#Building-datasets-for-experiments)
1. [Basic optimization](#Basic-optimization)
1. [Wrapper for SGDClassifier](#Wrapper-for-SGDClassifier)
1. [Wrapper for LogisticRegression](#Wrapper-for-LogisticRegression)
1. [Other scikit-learn models](#Other-scikit-learn-models)
1. [Experiments](#Experiments)
1. [Experiment with default values](#Experiment-with-default-values)
1. [A dev set run](#A-dev-set-run)
1. [Assessing BasicSGDClassifier](#Assessing-BasicSGDClassifier)
1. [Comparison with the baselines from Socher et al. 2013](#Comparison-with-the-baselines-from-Socher-et-al.-2013)
1. [A shallow neural network classifier](#A-shallow-neural-network-classifier)
1. [A softmax classifier in PyTorch](#A-softmax-classifier-in-PyTorch)
1. [Hyperparameter search](#Hyperparameter-search)
1. [utils.fit_classifier_with_crossvalidation](#utils.fit_classifier_with_crossvalidation)
1. [Example using LogisticRegression](#Example-using-LogisticRegression)
1. [Example using BasicSGDClassifier](#Example-using-BasicSGDClassifier)
1. [Statistical comparison of classifier models](#Statistical-comparison-of-classifier-models)
1. [Comparison with the Wilcoxon signed-rank test](#Comparison-with-the-Wilcoxon-signed-rank-test)
1. [Comparison with McNemar's test](#Comparison-with-McNemar's-test)
## Overview
* The focus of this notebook is __building feature representations__ for use with (mostly linear) classifiers (though you're encouraged to try out some non-linear ones as well!).
* The core characteristics of the feature functions we'll build here:
* They represent examples in __very large, very sparse feature spaces__.
* The individual feature functions can be __highly refined__, drawing on expert human knowledge of the domain.
* Taken together, these representations don't comprehensively represent the input examples. They just identify aspects of the inputs that the classifier model can make good use of (we hope).
* These classifiers tend to be __highly competitive__. We'll look at more powerful deep learning models in the next notebook, and it will immediately become apparent that it is very difficult to get them to measure up to well-built classifiers based in sparse feature representations.
## Set-up
See [the previous notebook](sst_01_overview.ipynb#Set-up) for set-up instructions.
```
from collections import Counter
import os
from sklearn.linear_model import LogisticRegression
import scipy.stats
from np_sgd_classifier import BasicSGDClassifier
import torch.nn as nn
from torch_shallow_neural_classifier import TorchShallowNeuralClassifier
import sst
import utils
# Set all the random seeds for reproducibility. Only the
# system and torch seeds are relevant for this notebook.
utils.fix_random_seeds()
SST_HOME = os.path.join('data', 'trees')
```
## Feature functions
* Feature representation is arguably __the most important step in any machine learning task__. As you experiment with the SST, you'll come to appreciate this fact, since your choice of feature function will have a far greater impact on the effectiveness of your models than any other choice you make.
* We will define our feature functions as `dict`s mapping feature names (which can be any object that can be a `dict` key) to their values (which must be `bool`, `int`, or `float`).
* To prepare for optimization, we will use `sklearn`'s [DictVectorizer](http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.DictVectorizer.html) class to turn these into matrices of features.
* The `dict`-based approach gives us a lot of flexibility and frees us from having to worry about the underlying feature matrix.
A typical baseline or default feature representation in NLP or NLU is built from unigrams. Here, those are the leaf nodes of the tree:
```
def unigrams_phi(tree):
"""The basis for a unigrams feature function.
Parameters
----------
tree : nltk.tree
The tree to represent.
Returns
-------
defaultdict
A map from strings to their counts in `tree`. (Counter maps a
list to a dict of counts of the elements in that list.)
"""
return Counter(tree.leaves())
```
In the docstring for `sst.sentiment_treebank_reader`, I pointed out that the labels on the subtrees can be used in a way that feels like cheating. Here's the most dramatic instance of this: `root_daughter_scores_phi` uses just the labels on the daughters of the root to predict the root (label). This will result in performance well north of 90% F1, but that's hardly worth reporting. (Interestingly, using the labels on the leaf nodes is much less powerful.) Anyway, don't use this function!
```
def root_daughter_scores_phi(tree):
"""The best way we've found to cheat without literally using the
labels as part of the feature representations.
Don't use this for any real experiments!
"""
return Counter([child.label() for child in tree])
```
It's generally good design to __write lots of atomic feature functions__ and then bring them together into a single function when running experiments. This will lead to reusable parts that you can assess independently and in sub-groups as part of development.
## Building datasets for experiments
The second major phase for our analysis is a kind of set-up phase. Ingredients:
* A reader like `train_reader`
* A feature function like `unigrams_phi`
* A class function like `binary_class_func`
The convenience function `sst.build_dataset` uses these to build a dataset for training and assessing a model. See its documentation for details on how it works. Much of this is about taking advantage of `sklearn`'s many functions for model building.
```
train_dataset = sst.build_dataset(
SST_HOME,
reader=sst.train_reader,
phi=unigrams_phi,
class_func=sst.binary_class_func,
vectorizer=None)
print("Train dataset with unigram features has {:,} examples and {:,} features".format(
*train_dataset['X'].shape))
```
Notice that `sst.build_dataset` has an optional argument `vectorizer`:
* If it is `None`, then a new vectorizer is used and returned as `dataset['vectorizer']`. This is the usual scenario when training.
* For evaluation, one wants to represent examples exactly as they were represented during training. To ensure that this happens, pass the training `vectorizer` to this function:
```
dev_dataset = sst.build_dataset(
SST_HOME,
reader=sst.dev_reader,
phi=unigrams_phi,
class_func=sst.binary_class_func,
vectorizer=train_dataset['vectorizer'])
print("Dev dataset with unigram features has {:,} examples "
"and {:,} features".format(*dev_dataset['X'].shape))
```
## Basic optimization
We're now in a position to begin training supervised models!
For the most part, in this course, we will not study the theoretical aspects of machine learning optimization, concentrating instead on how to optimize systems effectively in practice. That is, this isn't a theory course, but rather an experimental, project-oriented one.
Nonetheless, we do want to avoid treating our optimizers as black boxes that work their magic and give us some assessment figures for whatever we feed into them. That seems irresponsible from a scientific and engineering perspective, and it also sends the false signal that the optimization process is inherently mysterious. So we do want to take a minute to demystify it with some simple code.
The module `sgd_classifier` contains a complete optimization framework, as `BasicSGDClassifier`. Well, it's complete in the sense that it achieves our full task of supervised learning. It's incomplete in the sense that it is very basic. You probably wouldn't want to use it in experiments. Rather, we're going to encourage you to rely on `sklearn` for your experiments (see below). Still, this is a good basic picture of what's happening under the hood.
So what is `BasicSGDClassifier` doing? The heart of it is the `fit` function (reflecting the usual `sklearn` naming system). This method implements a hinge-loss stochastic sub-gradient descent optimization. Intuitively, it works as follows:
1. Start by assuming that all the feature weights are `0`.
1. Move through the dataset instance-by-instance in random order.
1. For each instance, classify it using the current weights.
1. If the classification is incorrect, move the weights in the direction of the correct classification
This process repeats for a user-specified number of iterations (default `10` below), and the weight movement is tempered by a learning-rate parameter `eta` (default `0.1`). The output is a set of weights that can be used to make predictions about new (properly featurized) examples.
In more technical terms, the objective function is
$$
\min_{\mathbf{w} \in \mathbb{R}^{d}}
\sum_{(x,y)\in\mathcal{D}}
\max_{y'\in\mathbf{Y}}
\left[\mathbf{Score}_{\textbf{w}, \phi}(x,y') + \mathbf{cost}(y,y')\right] - \mathbf{Score}_{\textbf{w}, \phi}(x,y)
$$
where $\mathbf{w}$ is the set of weights to be learned, $\mathcal{D}$ is the training set of example–label pairs, $\mathbf{Y}$ is the set of labels, $\mathbf{cost}(y,y') = 0$ if $y=y'$, else $1$, and $\mathbf{Score}_{\textbf{w}, \phi}(x,y')$ is the inner product of the weights
$\mathbf{w}$ and the example as featurized according to $\phi$.
The `fit` method is then calculating the sub-gradient of this objective. In succinct pseudo-code:
* Initialize $\mathbf{w} = \mathbf{0}$
* Repeat $T$ times:
* for each $(x,y) \in \mathcal{D}$ (in random order):
* $\tilde{y} = \text{argmax}_{y'\in \mathcal{Y}} \mathbf{Score}_{\textbf{w}, \phi}(x,y') + \mathbf{cost}(y,y')$
* $\mathbf{w} = \mathbf{w} + \eta(\phi(x,y) - \phi(x,\tilde{y}))$
This is very intuitive – push the weights in the direction of the positive cases. It doesn't require any probability theory. And such loss functions have proven highly effective in many settings. For a more powerful version of this classifier, see [sklearn.linear_model.SGDClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDClassifier.html#sklearn.linear_model.SGDClassifier). With `loss='hinge'`, it should behave much like `BasicSGDClassifier` (but faster!).
### Wrapper for SGDClassifier
For the sake of our experimental framework, a simple wrapper for `SGDClassifier`:
```
def fit_basic_sgd_classifier(X, y):
"""Wrapper for `BasicSGDClassifier`.
Parameters
----------
X : 2d np.array
The matrix of features, one example per row.
y : list
The list of labels for rows in `X`.
Returns
-------
BasicSGDClassifier
A trained `BasicSGDClassifier` instance.
"""
mod = BasicSGDClassifier()
mod.fit(X, y)
return mod
```
### Wrapper for LogisticRegression
As I said above, we likely don't want to rely on `BasicSGDClassifier` (though it does a good job with SST!). Instead, we want to rely on `sklearn`. Here's a simple wrapper for [sklearn.linear.model.LogisticRegression](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) using our
`build_dataset` paradigm.
```
def fit_softmax_classifier(X, y):
"""Wrapper for `sklearn.linear.model.LogisticRegression`. This is
also called a Maximum Entropy (MaxEnt) Classifier, which is more
fitting for the multiclass case.
Parameters
----------
X : 2d np.array
The matrix of features, one example per row.
y : list
The list of labels for rows in `X`.
Returns
-------
sklearn.linear.model.LogisticRegression
A trained `LogisticRegression` instance.
"""
mod = LogisticRegression(
fit_intercept=True,
solver='liblinear',
multi_class='auto')
mod.fit(X, y)
return mod
```
### Other scikit-learn models
* The [sklearn.linear_model](http://scikit-learn.org/stable/modules/classes.html#module-sklearn.linear_model) package has a number of other classifier models that could be effective for SST.
* The [sklearn.ensemble](http://scikit-learn.org/stable/modules/classes.html#module-sklearn.ensemble) package contains powerful classifiers as well. The theme that runs through all of them is that one can get better results by averaging the predictions of a bunch of more basic classifiers. A [RandomForestClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html#sklearn.ensemble.RandomForestClassifier) will bring some of the power of deep learning models without the optimization challenges (though see [this blog post on some limitations of the current sklearn implementation](https://roamanalytics.com/2016/10/28/are-categorical-variables-getting-lost-in-your-random-forests/)).
* The [sklearn.svm](http://scikit-learn.org/stable/modules/classes.html#module-sklearn.svm) contains variations on Support Vector Machines (SVMs).
## Experiments
We now have all the pieces needed to run experiments. And __we're going to want to run a lot of experiments__, trying out different feature functions, taking different perspectives on the data and labels, and using different models.
To make that process efficient and regimented, `sst` contains a function `experiment`. All it does is pull together these pieces and use them for training and assessment. It's complicated, but the flexibility will turn out to be an asset.
### Experiment with default values
```
_ = sst.experiment(
SST_HOME,
unigrams_phi,
fit_softmax_classifier,
train_reader=sst.train_reader,
assess_reader=None,
train_size=0.7,
class_func=sst.ternary_class_func,
score_func=utils.safe_macro_f1,
verbose=True)
```
A few notes on this function call:
* Since `assess_reader=None`, the function reports performance on a random train–test split. Give `sst.dev_reader` as the argument to assess against the `dev` set.
* `unigrams_phi` is the function we defined above. By changing/expanding this function, you can start to improve on the above baseline, perhaps periodically seeing how you do on the dev set.
* `fit_softmax_classifier` is the wrapper we defined above. To assess new models, simply define more functions like this one. Such functions just need to consume an `(X, y)` constituting a dataset and return a model.
### A dev set run
```
_ = sst.experiment(
SST_HOME,
unigrams_phi,
fit_softmax_classifier,
class_func=sst.ternary_class_func,
assess_reader=sst.dev_reader)
```
### Assessing BasicSGDClassifier
```
_ = sst.experiment(
SST_HOME,
unigrams_phi,
fit_basic_sgd_classifier,
class_func=sst.ternary_class_func,
assess_reader=sst.dev_reader)
```
### Comparison with the baselines from Socher et al. 2013
Where does our default set-up sit with regard to published baselines for the binary problem? (Compare [Socher et al., Table 1](http://www.aclweb.org/anthology/D/D13/D13-1170.pdf).)
```
_ = sst.experiment(
SST_HOME,
unigrams_phi,
fit_softmax_classifier,
class_func=sst.binary_class_func,
assess_reader=sst.dev_reader)
```
### A shallow neural network classifier
While we're at it, we might as well see whether adding a hidden layer to our softmax classifier yields any benefits. Whereas `LogisticRegression` is, at its core, computing
$$\begin{align*}
y &= \textbf{softmax}(xW_{xy} + b_{y})
\end{align*}$$
the shallow neural network inserts a hidden layer with a non-linear activation applied to it:
$$\begin{align*}
h &= \tanh(xW_{xh} + b_{h}) \\
y &= \textbf{softmax}(hW_{hy} + b_{y})
\end{align*}$$
```
def fit_nn_classifier(X, y):
mod = TorchShallowNeuralClassifier(
hidden_dim=50, max_iter=100)
mod.fit(X, y)
return mod
_ = sst.experiment(
SST_HOME,
unigrams_phi,
fit_nn_classifier,
class_func=sst.binary_class_func)
```
It looks like, with enough iterations (and perhaps some fiddling with the activation function and hidden dimensionality), this classifier would meet or exceed the baseline set up by `LogisticRegression`.
### A softmax classifier in PyTorch
Our PyTorch modules should support easy modification. For example, to turn `TorchShallowNeuralClassifier` into a `TorchSoftmaxClassifier`, one need only write a new `define_graph` method:
```
class TorchSoftmaxClassifier(TorchShallowNeuralClassifier):
def define_graph(self):
return nn.Linear(self.input_dim, self.n_classes_)
def fit_torch_softmax(X, y):
mod = TorchSoftmaxClassifier(max_iter=100)
mod.fit(X, y)
return mod
_ = sst.experiment(
SST_HOME,
unigrams_phi,
fit_torch_softmax,
class_func=sst.binary_class_func)
```
## Hyperparameter search
The training process learns __parameters__ — the weights. There are typically lots of other parameters that need to be set. For instance, our `BasicSGDClassifier` has a learning rate parameter and a training iteration parameter. These are called __hyperparameters__. The more powerful `sklearn` classifiers often have many more such hyperparameters. These are outside of the explicitly stated objective, hence the "hyper" part.
So far, we have just set the hyperparameters by hand. However, their optimal values can vary widely between datasets, and choices here can dramatically impact performance, so we would like to set them as part of the overall experimental framework.
### utils.fit_classifier_with_crossvalidation
Luckily, `sklearn` provides a lot of functionality for setting hyperparameters via cross-validation. The function `utils.fit_classifier_with_crossvalidation` implements a basic framework for taking advantage of these options.
This method has the same basic shape as `fit_softmax_classifier` above: it takes a dataset as input and returns a trained model. However, to find its favored model, it explores a space of hyperparameters supplied by the user, seeking the optimal combination of settings.
__Note__: this kind of search seems not to have a large impact for SST as we're using it. However, it can matter a lot for other data sets, and it's also an important step to take when trying to publish, since __reviewers are likely to want to check that your comparisons aren't based in part on opportunistic or ill-considered choices for the hyperparameters__.
### Example using LogisticRegression
Here's a fairly full-featured use of the above for the `LogisticRegression` model family:
```
def fit_softmax_with_crossvalidation(X, y):
"""A MaxEnt model of dataset with hyperparameter
cross-validation. Some notes:
* 'fit_intercept': whether to include the class bias feature.
* 'C': weight for the regularization term (smaller is more regularized).
* 'penalty': type of regularization -- roughly, 'l1' ecourages small
sparse models, and 'l2' encourages the weights to conform to a
gaussian prior distribution.
Other arguments can be cross-validated; see
http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html
Parameters
----------
X : 2d np.array
The matrix of features, one example per row.
y : list
The list of labels for rows in `X`.
Returns
-------
sklearn.linear_model.LogisticRegression
A trained model instance, the best model found.
"""
basemod = LogisticRegression(
fit_intercept=True,
solver='liblinear',
multi_class='auto')
cv = 5
param_grid = {'fit_intercept': [True, False],
'C': [0.4, 0.6, 0.8, 1.0, 2.0, 3.0],
'penalty': ['l1','l2']}
best_mod = utils.fit_classifier_with_crossvalidation(
X, y, basemod, cv, param_grid)
return best_mod
softmax_experiment = sst.experiment(
SST_HOME,
unigrams_phi,
fit_softmax_with_crossvalidation,
class_func=sst.ternary_class_func)
```
### Example using BasicSGDClassifier
The models written for this course are also compatible with this framework. They ["duck type"](https://en.wikipedia.org/wiki/Duck_typing) the `sklearn` models by having methods `fit`, `predict`, `get_params`, and `set_params`, and an attribute `params`.
```
def fit_basic_sgd_classifier_with_crossvalidation(X, y):
basemod = BasicSGDClassifier()
cv = 5
param_grid = {'eta': [0.01, 0.1, 1.0], 'max_iter': [10]}
best_mod = utils.fit_classifier_with_crossvalidation(
X, y, basemod, cv, param_grid)
return best_mod
sgd_experiment = sst.experiment(
SST_HOME,
unigrams_phi,
fit_basic_sgd_classifier_with_crossvalidation,
class_func=sst.ternary_class_func)
```
## Statistical comparison of classifier models
Suppose two classifiers differ according to an effectiveness measure like F1 or accuracy. Are they meaningfully different?
* For very large datasets, the answer might be clear: if performance is very stable across different train/assess splits and the difference in terms of correct predictions has practical importance, then you can clearly say yes.
* With smaller datasets, or models whose performance is closer together, it can be harder to determine whether the two models are different. We can address this question in a basic way with repeated runs and basic null-hypothesis testing on the resulting score vectors.
In general, one wants to compare __two feature functions against the same model__, or one wants to compare __two models with the same feature function used for both__. If both are changed at the same time, then it will be hard to figure out what is causing any differences you see.
### Comparison with the Wilcoxon signed-rank test
The function `sst.compare_models` is designed for such testing. The default set-up uses the non-parametric [Wilcoxon signed-rank test](https://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test) to make the comparisons, which is relatively conservative and recommended by [Demšar 2006](http://www.jmlr.org/papers/v7/demsar06a.html) for cases where one can afford to do multiple assessments. For discussion, see [the evaluation methods notebook](evaluation_methods.ipynb#Wilcoxon-signed-rank-test).
Here's an example showing the default parameters values and comparing `LogisticRegression` and `BasicSGDClassifier`:
```
_ = sst.compare_models(
SST_HOME,
unigrams_phi,
fit_softmax_classifier,
stats_test=scipy.stats.wilcoxon,
trials=10,
phi2=None, # Defaults to same as first required argument.
train_func2=fit_basic_sgd_classifier, # Defaults to same as second required argument.
reader=sst.train_reader,
train_size=0.7,
class_func=sst.ternary_class_func,
score_func=utils.safe_macro_f1)
```
### Comparison with McNemar's test
[McNemar's test](https://en.wikipedia.org/wiki/McNemar%27s_test) operates directly on the vectors of predictions for the two models being compared. As such, it doesn't require repeated runs, which is good where optimization is expensive. For discussion, see [the evaluation methods notebook](evaluation_methods.ipynb#McNemar's-test).
```
m = utils.mcnemar(
softmax_experiment['assess_dataset']['y'],
sgd_experiment['predictions'],
softmax_experiment['predictions'])
p = "p < 0.0001" if m[1] < 0.0001 else m[1]
print("McNemar's test: {0:0.02f} ({1:})".format(m[0], p))
```
|
github_jupyter
|
__author__ = "Christopher Potts"
__version__ = "CS224u, Stanford, Spring 2020"
from collections import Counter
import os
from sklearn.linear_model import LogisticRegression
import scipy.stats
from np_sgd_classifier import BasicSGDClassifier
import torch.nn as nn
from torch_shallow_neural_classifier import TorchShallowNeuralClassifier
import sst
import utils
# Set all the random seeds for reproducibility. Only the
# system and torch seeds are relevant for this notebook.
utils.fix_random_seeds()
SST_HOME = os.path.join('data', 'trees')
def unigrams_phi(tree):
"""The basis for a unigrams feature function.
Parameters
----------
tree : nltk.tree
The tree to represent.
Returns
-------
defaultdict
A map from strings to their counts in `tree`. (Counter maps a
list to a dict of counts of the elements in that list.)
"""
return Counter(tree.leaves())
def root_daughter_scores_phi(tree):
"""The best way we've found to cheat without literally using the
labels as part of the feature representations.
Don't use this for any real experiments!
"""
return Counter([child.label() for child in tree])
train_dataset = sst.build_dataset(
SST_HOME,
reader=sst.train_reader,
phi=unigrams_phi,
class_func=sst.binary_class_func,
vectorizer=None)
print("Train dataset with unigram features has {:,} examples and {:,} features".format(
*train_dataset['X'].shape))
dev_dataset = sst.build_dataset(
SST_HOME,
reader=sst.dev_reader,
phi=unigrams_phi,
class_func=sst.binary_class_func,
vectorizer=train_dataset['vectorizer'])
print("Dev dataset with unigram features has {:,} examples "
"and {:,} features".format(*dev_dataset['X'].shape))
def fit_basic_sgd_classifier(X, y):
"""Wrapper for `BasicSGDClassifier`.
Parameters
----------
X : 2d np.array
The matrix of features, one example per row.
y : list
The list of labels for rows in `X`.
Returns
-------
BasicSGDClassifier
A trained `BasicSGDClassifier` instance.
"""
mod = BasicSGDClassifier()
mod.fit(X, y)
return mod
def fit_softmax_classifier(X, y):
"""Wrapper for `sklearn.linear.model.LogisticRegression`. This is
also called a Maximum Entropy (MaxEnt) Classifier, which is more
fitting for the multiclass case.
Parameters
----------
X : 2d np.array
The matrix of features, one example per row.
y : list
The list of labels for rows in `X`.
Returns
-------
sklearn.linear.model.LogisticRegression
A trained `LogisticRegression` instance.
"""
mod = LogisticRegression(
fit_intercept=True,
solver='liblinear',
multi_class='auto')
mod.fit(X, y)
return mod
_ = sst.experiment(
SST_HOME,
unigrams_phi,
fit_softmax_classifier,
train_reader=sst.train_reader,
assess_reader=None,
train_size=0.7,
class_func=sst.ternary_class_func,
score_func=utils.safe_macro_f1,
verbose=True)
_ = sst.experiment(
SST_HOME,
unigrams_phi,
fit_softmax_classifier,
class_func=sst.ternary_class_func,
assess_reader=sst.dev_reader)
_ = sst.experiment(
SST_HOME,
unigrams_phi,
fit_basic_sgd_classifier,
class_func=sst.ternary_class_func,
assess_reader=sst.dev_reader)
_ = sst.experiment(
SST_HOME,
unigrams_phi,
fit_softmax_classifier,
class_func=sst.binary_class_func,
assess_reader=sst.dev_reader)
def fit_nn_classifier(X, y):
mod = TorchShallowNeuralClassifier(
hidden_dim=50, max_iter=100)
mod.fit(X, y)
return mod
_ = sst.experiment(
SST_HOME,
unigrams_phi,
fit_nn_classifier,
class_func=sst.binary_class_func)
class TorchSoftmaxClassifier(TorchShallowNeuralClassifier):
def define_graph(self):
return nn.Linear(self.input_dim, self.n_classes_)
def fit_torch_softmax(X, y):
mod = TorchSoftmaxClassifier(max_iter=100)
mod.fit(X, y)
return mod
_ = sst.experiment(
SST_HOME,
unigrams_phi,
fit_torch_softmax,
class_func=sst.binary_class_func)
def fit_softmax_with_crossvalidation(X, y):
"""A MaxEnt model of dataset with hyperparameter
cross-validation. Some notes:
* 'fit_intercept': whether to include the class bias feature.
* 'C': weight for the regularization term (smaller is more regularized).
* 'penalty': type of regularization -- roughly, 'l1' ecourages small
sparse models, and 'l2' encourages the weights to conform to a
gaussian prior distribution.
Other arguments can be cross-validated; see
http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html
Parameters
----------
X : 2d np.array
The matrix of features, one example per row.
y : list
The list of labels for rows in `X`.
Returns
-------
sklearn.linear_model.LogisticRegression
A trained model instance, the best model found.
"""
basemod = LogisticRegression(
fit_intercept=True,
solver='liblinear',
multi_class='auto')
cv = 5
param_grid = {'fit_intercept': [True, False],
'C': [0.4, 0.6, 0.8, 1.0, 2.0, 3.0],
'penalty': ['l1','l2']}
best_mod = utils.fit_classifier_with_crossvalidation(
X, y, basemod, cv, param_grid)
return best_mod
softmax_experiment = sst.experiment(
SST_HOME,
unigrams_phi,
fit_softmax_with_crossvalidation,
class_func=sst.ternary_class_func)
def fit_basic_sgd_classifier_with_crossvalidation(X, y):
basemod = BasicSGDClassifier()
cv = 5
param_grid = {'eta': [0.01, 0.1, 1.0], 'max_iter': [10]}
best_mod = utils.fit_classifier_with_crossvalidation(
X, y, basemod, cv, param_grid)
return best_mod
sgd_experiment = sst.experiment(
SST_HOME,
unigrams_phi,
fit_basic_sgd_classifier_with_crossvalidation,
class_func=sst.ternary_class_func)
_ = sst.compare_models(
SST_HOME,
unigrams_phi,
fit_softmax_classifier,
stats_test=scipy.stats.wilcoxon,
trials=10,
phi2=None, # Defaults to same as first required argument.
train_func2=fit_basic_sgd_classifier, # Defaults to same as second required argument.
reader=sst.train_reader,
train_size=0.7,
class_func=sst.ternary_class_func,
score_func=utils.safe_macro_f1)
m = utils.mcnemar(
softmax_experiment['assess_dataset']['y'],
sgd_experiment['predictions'],
softmax_experiment['predictions'])
p = "p < 0.0001" if m[1] < 0.0001 else m[1]
print("McNemar's test: {0:0.02f} ({1:})".format(m[0], p))
| 0.910497 | 0.960249 |
```
from PIL import Image, ImageOps
import pytesseract
import numpy as np
import os
import cv2
import matplotlib.pyplot as plt
import os
from os.path import abspath
%matplotlib inline
plt.figure(figsize=(3,3))
img = cv2.imread('../img/test2.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
plt.imshow(gray, interpolation='none', cmap='gray')
cv2.imwrite("imgs/base_img.png", gray)
text_area = gray[750:1000, 5:414]
plt.imshow(text_area, interpolation='none', cmap='gray')
```
## Image Conversion
```
kernel = np.ones((2,1),np.uint8)
kernel2 = np.ones((5,17),np.uint8)
thr3 = cv2.adaptiveThreshold(text_area,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,37,-70)
eroded = cv2.erode(thr3,kernel,iterations = 1)
dilated = cv2.dilate(eroded,kernel2,iterations = 1)
#Image.fromarray(dilated).show()
extracted = cv2.bitwise_and(text_area,text_area, mask = dilated)
#Image.fromarray(extracted).show()
cv2.imwrite("imgs/extracted.png", extracted)
inverted = cv2.bitwise_not(extracted)
#Image.fromarray(inverted).show()
cv2.imwrite("imgs/inverted.png", inverted)
def resize(img, scale_percent):
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
dim = (width, height)
# resize image
return cv2.resize(img, dim, interpolation = cv2.INTER_LINEAR)
def brightness_contrast_adjust(img, b, c):
return cv2.addWeighted(img, 1. + c/127., img, 0, b-c)
_, contours, hierarchy = cv2.findContours(dilated.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
t2 = text_area.copy()
lines = []
for c in contours:
(x, y, w, h) = cv2.boundingRect(c) #bounding box
ar = w / float(h) #Aspect ratio
if ar > 4 and h >10:
print "height: {}".format(h)
print "Aspect ratio: {}".format(ar)
lines.append((y, t2[y:y + h, x:x + w].copy())) #add y value so can sort
cv2.rectangle(t2, (x, y), (x + w, y + h), (0, 255, 0), 2)
lines = sorted(lines, key=lambda x: x[0])
lines = map(lambda x: x[1], lines)
cv2.imwrite("imgs/bounds.png",t2)
lines = map(cv2.bitwise_not, lines)
lines = map(lambda x: resize(x, 300), lines)
for i, line in enumerate(lines):
cv2.imwrite("imgs/line" + str(i) + ".png", line)
for line in lines:
text = pytesseract.image_to_string(line, lang="eng", config='-psm 7')
print(text)
print "#################"
open_kernel = np.ones((2,2),np.uint8)
for i, line in enumerate(lines):
blur = cv2.GaussianBlur(line,(3,3),1)
thresh = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_MEAN_C, \
cv2.THRESH_BINARY,13,26)
#processed_line = resize(thresh, 300)
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, open_kernel)
cv2.imwrite("imgs/line_proccessed" + str(i) + ".png", thresh)
text = pytesseract.image_to_string(thresh, lang="eng", config='-psm 7')
print(text)
contrast = brightness_contrast_adjust(inverted, 0, 100)
#Image.fromarray(contrast).show()
resized = resize(contrast, 300)
cv2.imwrite("imgs/high_contrast_big.png", resized)
text = pytesseract.image_to_string(resized, lang="eng", config='--psm 4')
print(text)
second_thresh = cv2.adaptiveThreshold(contrast,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, \
cv2.THRESH_BINARY,31,90)
cv2.imwrite("imgs/2nd.png", second_thresh)
text = pytesseract.image_to_string(resize(second_thresh,200), lang="eng", config='--psm=4')
print(text)
```
|
github_jupyter
|
from PIL import Image, ImageOps
import pytesseract
import numpy as np
import os
import cv2
import matplotlib.pyplot as plt
import os
from os.path import abspath
%matplotlib inline
plt.figure(figsize=(3,3))
img = cv2.imread('../img/test2.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
plt.imshow(gray, interpolation='none', cmap='gray')
cv2.imwrite("imgs/base_img.png", gray)
text_area = gray[750:1000, 5:414]
plt.imshow(text_area, interpolation='none', cmap='gray')
kernel = np.ones((2,1),np.uint8)
kernel2 = np.ones((5,17),np.uint8)
thr3 = cv2.adaptiveThreshold(text_area,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,37,-70)
eroded = cv2.erode(thr3,kernel,iterations = 1)
dilated = cv2.dilate(eroded,kernel2,iterations = 1)
#Image.fromarray(dilated).show()
extracted = cv2.bitwise_and(text_area,text_area, mask = dilated)
#Image.fromarray(extracted).show()
cv2.imwrite("imgs/extracted.png", extracted)
inverted = cv2.bitwise_not(extracted)
#Image.fromarray(inverted).show()
cv2.imwrite("imgs/inverted.png", inverted)
def resize(img, scale_percent):
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
dim = (width, height)
# resize image
return cv2.resize(img, dim, interpolation = cv2.INTER_LINEAR)
def brightness_contrast_adjust(img, b, c):
return cv2.addWeighted(img, 1. + c/127., img, 0, b-c)
_, contours, hierarchy = cv2.findContours(dilated.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
t2 = text_area.copy()
lines = []
for c in contours:
(x, y, w, h) = cv2.boundingRect(c) #bounding box
ar = w / float(h) #Aspect ratio
if ar > 4 and h >10:
print "height: {}".format(h)
print "Aspect ratio: {}".format(ar)
lines.append((y, t2[y:y + h, x:x + w].copy())) #add y value so can sort
cv2.rectangle(t2, (x, y), (x + w, y + h), (0, 255, 0), 2)
lines = sorted(lines, key=lambda x: x[0])
lines = map(lambda x: x[1], lines)
cv2.imwrite("imgs/bounds.png",t2)
lines = map(cv2.bitwise_not, lines)
lines = map(lambda x: resize(x, 300), lines)
for i, line in enumerate(lines):
cv2.imwrite("imgs/line" + str(i) + ".png", line)
for line in lines:
text = pytesseract.image_to_string(line, lang="eng", config='-psm 7')
print(text)
print "#################"
open_kernel = np.ones((2,2),np.uint8)
for i, line in enumerate(lines):
blur = cv2.GaussianBlur(line,(3,3),1)
thresh = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_MEAN_C, \
cv2.THRESH_BINARY,13,26)
#processed_line = resize(thresh, 300)
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, open_kernel)
cv2.imwrite("imgs/line_proccessed" + str(i) + ".png", thresh)
text = pytesseract.image_to_string(thresh, lang="eng", config='-psm 7')
print(text)
contrast = brightness_contrast_adjust(inverted, 0, 100)
#Image.fromarray(contrast).show()
resized = resize(contrast, 300)
cv2.imwrite("imgs/high_contrast_big.png", resized)
text = pytesseract.image_to_string(resized, lang="eng", config='--psm 4')
print(text)
second_thresh = cv2.adaptiveThreshold(contrast,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, \
cv2.THRESH_BINARY,31,90)
cv2.imwrite("imgs/2nd.png", second_thresh)
text = pytesseract.image_to_string(resize(second_thresh,200), lang="eng", config='--psm=4')
print(text)
| 0.361165 | 0.688285 |
# Text Classification and Data Sets
:label:`chapter_sentiment`
Text classification is a common task in natural language processing, which transforms a sequence of text of indefinite length into a category of text. It's similar to the image classification, the most frequently used application in this book, e.g. :numref:`chapter_naive_bayes`. The only difference is that, rather than an image, text classification's example is a text sentence.
This section will focus on loading data for one of the sub-questions in this field: using text sentiment classification to analyze the emotions of the text's author. This problem is also called sentiment analysis and has a wide range of applications. For example, we can analyze user reviews of products to obtain user satisfaction statistics, or analyze user sentiments about market conditions and use it to predict future trends.
```
import d2l
from mxnet import gluon, nd
import os
import tarfile
```
## Text Sentiment Classification Data
We use Stanford's Large Movie Review Dataset as the data set for text sentiment classification[1]. This data set is divided into two data sets for training and testing purposes, each containing 25,000 movie reviews downloaded from IMDb. In each data set, the number of comments labeled as "positive" and "negative" is equal.
### Reading Data
We first download this data set to the "../data" path and extract it to "../data/aclImdb".
```
# Save to the d2l package.
def download_imdb(data_dir='../data'):
url = 'http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz'
fname = gluon.utils.download(url, data_dir)
with tarfile.open(fname, 'r') as f:
f.extractall(data_dir)
download_imdb()
```
Next, read the training and test data sets. Each example is a review and its corresponding label: 1 indicates "positive" and 0 indicates "negative".
```
# Save to the d2l package.
def read_imdb(folder='train', data_dir='../data'):
data, labels = [], []
for label in ['pos', 'neg']:
folder_name = os.path.join(data_dir, 'aclImdb', folder, label)
for file in os.listdir(folder_name):
with open(os.path.join(folder_name, file), 'rb') as f:
review = f.read().decode('utf-8').replace('\n', '')
data.append(review)
labels.append(1 if label == 'pos' else 0)
return data, labels
train_data = read_imdb('train')
print('# trainings:', len(train_data[0]))
for x, y in zip(train_data[0][:3], train_data[1][:3]):
print('label:', y, 'review:', x[0:60])
```
### Tokenization and Vocabulary
We use a word as a token, and then create a dictionary based on the training data set.
```
train_tokens = d2l.tokenize(train_data[0], token='word')
vocab = d2l.Vocab(train_tokens, min_freq=5)
d2l.set_figsize((3.5, 2.5))
d2l.plt.hist([len(line) for line in train_tokens], bins=range(0,1000,50));
```
### Padding to the Same Length
Because the reviews have different lengths, so they cannot be directly combined into mini-batches. Here we fix the length of each comment to 500 by truncating or adding "<unk>" indices.
```
num_steps = 500 # sequence length
train_features = nd.array([d2l.trim_pad(vocab[line], num_steps, vocab.unk)
for line in train_tokens])
train_features.shape
```
### Create Data Iterator
Now, we will create a data iterator. Each iteration will return a mini-batch of data.
```
train_iter = d2l.load_array((train_features, train_data[1]), 64)
for X, y in train_iter:
print('X', X.shape, 'y', y.shape)
break
'# batches:', len(train_iter)
```
## Put All Things Together
Lastly, we will save a function `load_data_imdb` into `d2l`, which returns the vocabulary and data iterators.
```
# Save to the d2l package.
def load_data_imdb(batch_size, num_steps=500):
download_imdb()
train_data, test_data = read_imdb('train'), read_imdb('test')
train_tokens = d2l.tokenize(train_data[0], token='word')
test_tokens = d2l.tokenize(test_data[0], token='word')
vocab = d2l.Vocab(train_tokens, min_freq=5)
train_features = nd.array([d2l.trim_pad(vocab[line], num_steps, vocab.unk)
for line in train_tokens])
test_features = nd.array([d2l.trim_pad(vocab[line], num_steps, vocab.unk)
for line in test_tokens])
train_iter = d2l.load_array((train_features, train_data[1]), batch_size)
test_iter = d2l.load_array((test_features, test_data[1]), batch_size,
is_train=False)
return train_iter, test_iter, vocab
```
## Summary
|
github_jupyter
|
import d2l
from mxnet import gluon, nd
import os
import tarfile
# Save to the d2l package.
def download_imdb(data_dir='../data'):
url = 'http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz'
fname = gluon.utils.download(url, data_dir)
with tarfile.open(fname, 'r') as f:
f.extractall(data_dir)
download_imdb()
# Save to the d2l package.
def read_imdb(folder='train', data_dir='../data'):
data, labels = [], []
for label in ['pos', 'neg']:
folder_name = os.path.join(data_dir, 'aclImdb', folder, label)
for file in os.listdir(folder_name):
with open(os.path.join(folder_name, file), 'rb') as f:
review = f.read().decode('utf-8').replace('\n', '')
data.append(review)
labels.append(1 if label == 'pos' else 0)
return data, labels
train_data = read_imdb('train')
print('# trainings:', len(train_data[0]))
for x, y in zip(train_data[0][:3], train_data[1][:3]):
print('label:', y, 'review:', x[0:60])
train_tokens = d2l.tokenize(train_data[0], token='word')
vocab = d2l.Vocab(train_tokens, min_freq=5)
d2l.set_figsize((3.5, 2.5))
d2l.plt.hist([len(line) for line in train_tokens], bins=range(0,1000,50));
num_steps = 500 # sequence length
train_features = nd.array([d2l.trim_pad(vocab[line], num_steps, vocab.unk)
for line in train_tokens])
train_features.shape
train_iter = d2l.load_array((train_features, train_data[1]), 64)
for X, y in train_iter:
print('X', X.shape, 'y', y.shape)
break
'# batches:', len(train_iter)
# Save to the d2l package.
def load_data_imdb(batch_size, num_steps=500):
download_imdb()
train_data, test_data = read_imdb('train'), read_imdb('test')
train_tokens = d2l.tokenize(train_data[0], token='word')
test_tokens = d2l.tokenize(test_data[0], token='word')
vocab = d2l.Vocab(train_tokens, min_freq=5)
train_features = nd.array([d2l.trim_pad(vocab[line], num_steps, vocab.unk)
for line in train_tokens])
test_features = nd.array([d2l.trim_pad(vocab[line], num_steps, vocab.unk)
for line in test_tokens])
train_iter = d2l.load_array((train_features, train_data[1]), batch_size)
test_iter = d2l.load_array((test_features, test_data[1]), batch_size,
is_train=False)
return train_iter, test_iter, vocab
| 0.388038 | 0.975693 |
```
import os
import pandas as pd
import matplotlib.pyplot as plt
from IPython.display import Image
import requests
from opera_tools import plot_dataframe, plot_dataframe_lines
%matplotlib inline
```
## Electronic detector data
```
event_url = "http://opendata.cern.ch/record/10101"
event_id = "9190097972"
def get_event_data(url, event_id, directory='data'):
file_names = [
f"{event_id}_EventInfo.csv",
f"{event_id}_FilteredDTHitsXZ.csv",
f"{event_id}_FilteredRPCHitsXZ.csv",
f"{event_id}_FilteredRPCHitsYZ.csv",
f"{event_id}_FilteredTTHitsYZ.csv",
f"{event_id}_Vertices.csv",
f"{event_id}_Tracks.csv",
f"{event_id}_Lines.csv",
]
if not os.path.exists(directory):
os.mkdir(directory)
for f in file_names:
csv_url = f"{url}/files/{f}"
req = requests.get(csv_url)
url_content = req.content
data_file = f'{directory}/{f}'
csv_file = open(data_file, 'wb')
csv_file.write(url_content)
csv_file.close()
print(data_file)
get_event_data(event_url, event_id)
EventInfo = pd.read_csv('data/9190097972_EventInfo.csv', index_col=None)
EventInfo
```
Detector consisits of:
1. scintillator strips target tracker (TT),
2. drift tubes (DT),
3. resistive plate chambers (RPC).
*DT only have the XZ projection, while for RPC and and TT have both XZ and YZ projections.*
### Filtered data with removed isolated hits
```
FilteredDTHitsXZ = pd.read_csv('data/9190097972_FilteredDTHitsXZ.csv', index_col=None)
FilteredDTHitsXZ
FilteredRPCHitsXZ = pd.read_csv('data/9190097972_FilteredRPCHitsXZ.csv', index_col=None)
FilteredRPCHitsXZ
FilteredRPCHitsYZ = pd.read_csv('data/9190097972_FilteredRPCHitsYZ.csv', index_col=None)
FilteredRPCHitsYZ
FilteredTTHitsXZ = pd.read_csv('data/9190097972_FilteredTTHitsXZ.csv', index_col=None)
print(len(FilteredTTHitsXZ))
FilteredTTHitsXZ.head()
FilteredTTHitsYZ = pd.read_csv('data/9190097972_FilteredTTHitsYZ.csv', index_col=None)
print(len(FilteredTTHitsYZ))
FilteredTTHitsYZ.head()
```
## Emulsion data
```
Vertices = pd.read_csv('data/9190097972_Vertices.csv', index_col=None)
Vertices
Tracks = pd.read_csv('data/9190097972_Tracks.csv', index_col=None)
print(len(Tracks))
Tracks.head()
Tracks.rename(index=str, columns={"posX": "SX", "posY": "SY", "posZ": "SZ", "slopeXZ": "TX", "slopeYZ": "TY" },
inplace=True)
plot_dataframe(Tracks, Vertices)
```
*Red points on the graph correspond to the vertices*
----------------
```
Lines = pd.read_csv('data/9190097972_Lines.csv', index_col=None)
print(len(Lines))
Lines.head()
plot_dataframe_lines(Lines)
```
|
github_jupyter
|
import os
import pandas as pd
import matplotlib.pyplot as plt
from IPython.display import Image
import requests
from opera_tools import plot_dataframe, plot_dataframe_lines
%matplotlib inline
event_url = "http://opendata.cern.ch/record/10101"
event_id = "9190097972"
def get_event_data(url, event_id, directory='data'):
file_names = [
f"{event_id}_EventInfo.csv",
f"{event_id}_FilteredDTHitsXZ.csv",
f"{event_id}_FilteredRPCHitsXZ.csv",
f"{event_id}_FilteredRPCHitsYZ.csv",
f"{event_id}_FilteredTTHitsYZ.csv",
f"{event_id}_Vertices.csv",
f"{event_id}_Tracks.csv",
f"{event_id}_Lines.csv",
]
if not os.path.exists(directory):
os.mkdir(directory)
for f in file_names:
csv_url = f"{url}/files/{f}"
req = requests.get(csv_url)
url_content = req.content
data_file = f'{directory}/{f}'
csv_file = open(data_file, 'wb')
csv_file.write(url_content)
csv_file.close()
print(data_file)
get_event_data(event_url, event_id)
EventInfo = pd.read_csv('data/9190097972_EventInfo.csv', index_col=None)
EventInfo
FilteredDTHitsXZ = pd.read_csv('data/9190097972_FilteredDTHitsXZ.csv', index_col=None)
FilteredDTHitsXZ
FilteredRPCHitsXZ = pd.read_csv('data/9190097972_FilteredRPCHitsXZ.csv', index_col=None)
FilteredRPCHitsXZ
FilteredRPCHitsYZ = pd.read_csv('data/9190097972_FilteredRPCHitsYZ.csv', index_col=None)
FilteredRPCHitsYZ
FilteredTTHitsXZ = pd.read_csv('data/9190097972_FilteredTTHitsXZ.csv', index_col=None)
print(len(FilteredTTHitsXZ))
FilteredTTHitsXZ.head()
FilteredTTHitsYZ = pd.read_csv('data/9190097972_FilteredTTHitsYZ.csv', index_col=None)
print(len(FilteredTTHitsYZ))
FilteredTTHitsYZ.head()
Vertices = pd.read_csv('data/9190097972_Vertices.csv', index_col=None)
Vertices
Tracks = pd.read_csv('data/9190097972_Tracks.csv', index_col=None)
print(len(Tracks))
Tracks.head()
Tracks.rename(index=str, columns={"posX": "SX", "posY": "SY", "posZ": "SZ", "slopeXZ": "TX", "slopeYZ": "TY" },
inplace=True)
plot_dataframe(Tracks, Vertices)
Lines = pd.read_csv('data/9190097972_Lines.csv', index_col=None)
print(len(Lines))
Lines.head()
plot_dataframe_lines(Lines)
| 0.231875 | 0.494812 |
# Info
DQN Implemented with Tensorflow tf-agent module.
Using same environment as in [tutorial](https://www.tensorflow.org/agents/tutorials/1_dqn_tutorial#hyperparameters): `CartPole-v0`.
Purpose is to see hwo easy it is to use the tf-agent library and how the results compare to other implementations.
# Imports
```
import numpy as np
import tensorflow as tf
from tf_agents.networks import q_network
from tf_agents.environments import suite_gym
from tf_agents.environments import tf_py_environment
from tf_agents.utils import common
from tf_agents.policies import random_tf_policy
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.trajectories import trajectory
import matplotlib.pyplot as plt
from tf_agents.agents.dqn import dqn_agent
plt.style.use('ggplot')
tf.__version__
```
# Hyperparameters
```
num_iterations = 20000 # @param {type:"integer"}
initial_collect_steps = 1000 # @param {type:"integer"}
collect_steps_per_iteration = 1 # @param {type:"integer"}
replay_buffer_max_length = 100000 # @param {type:"integer"}
batch_size = 64 # @param {type:"integer"}
learning_rate = 1e-3 # @param {type:"number"}
log_interval = 200 # @param {type:"integer"}
num_eval_episodes = 10 # @param {type:"integer"}
eval_interval = 1000 # @param {type:"integer"}
```
# Environment
```
env_name = 'CartPole-v0'#'LunarLander-v2'#'CartPole-v0'
train_py_env = suite_gym.load(env_name)
eval_py_env = suite_gym.load(env_name)
train_env = tf_py_environment.TFPyEnvironment(train_py_env)
eval_env = tf_py_environment.TFPyEnvironment(eval_py_env)
```
# Agent
```
fc_layer_params = (256,)
q_net = q_network.QNetwork(
train_env.observation_spec(),
train_env.action_spec(),
fc_layer_params=fc_layer_params)
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)
train_step_counter = tf.Variable(0)
agent = dqn_agent.DqnAgent(
train_env.time_step_spec(),
train_env.action_spec(),
q_network=q_net,
optimizer=optimizer,
td_errors_loss_fn=common.element_wise_squared_loss,
train_step_counter=train_step_counter)
agent.initialize()
```
# Policies
```
eval_policy = agent.policy
collect_policy = agent.collect_policy
random_policy = random_tf_policy.RandomTFPolicy(train_env.time_step_spec(),
train_env.action_spec())
def compute_avg_return(environment, policy, num_episodes=10):
"""Metrics and Evaluation
"""
total_return = 0.0
for _ in range(num_episodes):
time_step = environment.reset()
episode_return = 0.0
while not time_step.is_last():
action_step = policy.action(time_step)
time_step = environment.step(action_step.action)
episode_return += time_step.reward
total_return += episode_return
avg_return = total_return / num_episodes
return avg_return.numpy()[0]
def collect_step(environment, policy, buffer):
"""Data Collection
"""
time_step = environment.current_time_step()
action_step = policy.action(time_step)
next_time_step = environment.step(action_step.action)
traj = trajectory.from_transition(time_step, action_step, next_time_step)
# Add trajectory to the replay buffer
buffer.add_batch(traj)
def collect_data(env, policy, buffer, steps):
"""Data Collection
"""
for _ in range(steps):
collect_step(env, policy, buffer)
# Replay Buffer
replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
data_spec=agent.collect_data_spec,
batch_size=train_env.batch_size,
max_length=replay_buffer_max_length)
collect_data(train_env, random_policy, replay_buffer, steps=100)
# (Optional) Optimize by wrapping some of the code in a graph using TF function.
agent.train = common.function(agent.train)
# Reset the train step
agent.train_step_counter.assign(0)
# Evaluate the agent's policy once before training.
avg_return = compute_avg_return(eval_env, agent.policy, num_eval_episodes)
returns = [avg_return]
dataset = replay_buffer.as_dataset(
num_parallel_calls=3,
sample_batch_size=batch_size,
num_steps=2).prefetch(3)
iterator = iter(dataset)
print(iterator)
```
# Training the agent
```
for _ in range(num_iterations):
# Collect a few steps using collect_policy and save to the replay buffer.
for _ in range(collect_steps_per_iteration):
collect_step(train_env, agent.collect_policy, replay_buffer)
# Sample a batch of data from the buffer and update the agent's network.
experience, unused_info = next(iterator)
train_loss = agent.train(experience).loss
step = agent.train_step_counter.numpy()
# if step % log_interval == 0:
# print('step = {0}: loss = {1}'.format(step, train_loss))
if step % eval_interval == 0:
avg_return = compute_avg_return(train_env, agent.policy, num_eval_episodes)
print('step = {0}: Average Return = {1}'.format(step, avg_return))
returns.append(avg_return)
```
# Plots
```
iterations = range(0, num_iterations + 1, eval_interval)
plt.plot(iterations, returns)
plt.ylabel('Average Return')
plt.xlabel('Iterations')
plt.ylim(top=250)
```
|
github_jupyter
|
import numpy as np
import tensorflow as tf
from tf_agents.networks import q_network
from tf_agents.environments import suite_gym
from tf_agents.environments import tf_py_environment
from tf_agents.utils import common
from tf_agents.policies import random_tf_policy
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.trajectories import trajectory
import matplotlib.pyplot as plt
from tf_agents.agents.dqn import dqn_agent
plt.style.use('ggplot')
tf.__version__
num_iterations = 20000 # @param {type:"integer"}
initial_collect_steps = 1000 # @param {type:"integer"}
collect_steps_per_iteration = 1 # @param {type:"integer"}
replay_buffer_max_length = 100000 # @param {type:"integer"}
batch_size = 64 # @param {type:"integer"}
learning_rate = 1e-3 # @param {type:"number"}
log_interval = 200 # @param {type:"integer"}
num_eval_episodes = 10 # @param {type:"integer"}
eval_interval = 1000 # @param {type:"integer"}
env_name = 'CartPole-v0'#'LunarLander-v2'#'CartPole-v0'
train_py_env = suite_gym.load(env_name)
eval_py_env = suite_gym.load(env_name)
train_env = tf_py_environment.TFPyEnvironment(train_py_env)
eval_env = tf_py_environment.TFPyEnvironment(eval_py_env)
fc_layer_params = (256,)
q_net = q_network.QNetwork(
train_env.observation_spec(),
train_env.action_spec(),
fc_layer_params=fc_layer_params)
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)
train_step_counter = tf.Variable(0)
agent = dqn_agent.DqnAgent(
train_env.time_step_spec(),
train_env.action_spec(),
q_network=q_net,
optimizer=optimizer,
td_errors_loss_fn=common.element_wise_squared_loss,
train_step_counter=train_step_counter)
agent.initialize()
eval_policy = agent.policy
collect_policy = agent.collect_policy
random_policy = random_tf_policy.RandomTFPolicy(train_env.time_step_spec(),
train_env.action_spec())
def compute_avg_return(environment, policy, num_episodes=10):
"""Metrics and Evaluation
"""
total_return = 0.0
for _ in range(num_episodes):
time_step = environment.reset()
episode_return = 0.0
while not time_step.is_last():
action_step = policy.action(time_step)
time_step = environment.step(action_step.action)
episode_return += time_step.reward
total_return += episode_return
avg_return = total_return / num_episodes
return avg_return.numpy()[0]
def collect_step(environment, policy, buffer):
"""Data Collection
"""
time_step = environment.current_time_step()
action_step = policy.action(time_step)
next_time_step = environment.step(action_step.action)
traj = trajectory.from_transition(time_step, action_step, next_time_step)
# Add trajectory to the replay buffer
buffer.add_batch(traj)
def collect_data(env, policy, buffer, steps):
"""Data Collection
"""
for _ in range(steps):
collect_step(env, policy, buffer)
# Replay Buffer
replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
data_spec=agent.collect_data_spec,
batch_size=train_env.batch_size,
max_length=replay_buffer_max_length)
collect_data(train_env, random_policy, replay_buffer, steps=100)
# (Optional) Optimize by wrapping some of the code in a graph using TF function.
agent.train = common.function(agent.train)
# Reset the train step
agent.train_step_counter.assign(0)
# Evaluate the agent's policy once before training.
avg_return = compute_avg_return(eval_env, agent.policy, num_eval_episodes)
returns = [avg_return]
dataset = replay_buffer.as_dataset(
num_parallel_calls=3,
sample_batch_size=batch_size,
num_steps=2).prefetch(3)
iterator = iter(dataset)
print(iterator)
for _ in range(num_iterations):
# Collect a few steps using collect_policy and save to the replay buffer.
for _ in range(collect_steps_per_iteration):
collect_step(train_env, agent.collect_policy, replay_buffer)
# Sample a batch of data from the buffer and update the agent's network.
experience, unused_info = next(iterator)
train_loss = agent.train(experience).loss
step = agent.train_step_counter.numpy()
# if step % log_interval == 0:
# print('step = {0}: loss = {1}'.format(step, train_loss))
if step % eval_interval == 0:
avg_return = compute_avg_return(train_env, agent.policy, num_eval_episodes)
print('step = {0}: Average Return = {1}'.format(step, avg_return))
returns.append(avg_return)
iterations = range(0, num_iterations + 1, eval_interval)
plt.plot(iterations, returns)
plt.ylabel('Average Return')
plt.xlabel('Iterations')
plt.ylim(top=250)
| 0.748352 | 0.91181 |
```
import datetime as dt
import arviz as az
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import numpyro
import numpyro.distributions as dist
import pandas as pd
import seaborn as sns
from jax import random
from numpyro.infer import MCMC, NUTS
from scipy.special import expit
from sklearn.preprocessing import LabelEncoder
df = pd.read_csv(
'data/meta_lineages.csv',
index_col=0,
parse_dates=[1,3],
infer_datetime_format=True,
cache_dates=True,
dtype = {'SEQ_REASON': 'category',
'SENDING_LAB_PC': 'category',
'SEQUENCING_LAB_PC': 'category',
'lineage': 'category',
'scorpio_call': 'category'
}
)
df.rename(columns={
'DATE_DRAW': 'date',
'PROCESSING_DATE': 'processing_date',
'SEQ_REASON': 'reason',
'SENDING_LAB_PC': 'sending_pc',
'SEQUENCING_LAB_PC': 'sequencing_pc',
'lineage': 'lineage',
'scorpio_call': 'scorpio'
},
inplace=True
)
df = df[df.date > '2021-10-01']
df = df[df.reason.isin(['N'])]
df['omi'] = df.lineage.apply(lambda x: int((x == 'BA.1')))
all = df.groupby(['sending_pc','date'])['omi'].count()
omi = df.groupby(['sending_pc','date'])['omi'].apply(lambda x: (x == 1).sum()).fillna(0).astype(int)
df_reshaped = pd.concat({'all': all, 'omi': omi}, axis=1).reset_index()
today = pd.Timestamp(str(dt.date.today()))
df_reshaped['days'] = (df_reshaped.date-today).dt.days
df_reshaped['sending_area'] = df_reshaped.sending_pc.apply(lambda x: int(x[0]))
lab_counts = df_reshaped[df_reshaped.date > '2021-12-01'].groupby('sending_pc')['all'].sum()
# df_filtered = df_reshaped[df_reshaped.sending_pc.isin(lab_counts[lab_counts>10].index) & (~df_reshaped.sending_pc.isin(['']))].copy()
df_filtered = df_reshaped[df_reshaped.sending_pc.isin(lab_counts[lab_counts>10].index) & (~df_reshaped.sending_pc.isin(['22767','70193','24106']))].copy()
le = LabelEncoder()
df_filtered['sending_pc_ID'] = le.fit_transform(df_filtered['sending_pc'].values)
omi_counts = df_filtered["omi"].values
total_counts = df_filtered["all"].values
sample_day = df_filtered["days"].values
sending_pc_ID = df_filtered['sending_pc_ID'].values
sending_area = df_filtered['sending_area'].values
from numpyro.infer.reparam import LocScaleReparam
reparam_config = {k: LocScaleReparam(0) for k in ["mu_a", "a"]}
@numpyro.handlers.reparam(config=reparam_config)
def model3(sending_pc, sending_area, sample_day, total_counts,omi_counts=None):
mu_a_mu = numpyro.sample("mu_a_mu", dist.Normal(0, 3.0))
mu_a_sigma = numpyro.sample("mu_a_sigma", dist.HalfNormal(2.0))
sigma_a = numpyro.sample("sigma_a", dist.HalfNormal(2.0))
b = numpyro.sample("b", dist.Normal(0.2, 0.2))
n_labs = len(np.unique(sending_pc))
n_areas = len(np.unique(sending_area))
sending_pc_to_area = pd.DataFrame({"sending_pc": sending_pc, "sending_area": sending_area}).groupby(['sending_pc']).sending_area.first().values
with numpyro.plate("plate_i", n_areas):
mu_a = numpyro.sample("mu_a", dist.Normal(mu_a_mu, mu_a_sigma))
with numpyro.plate("plate_j", n_labs):
a = numpyro.sample("a", dist.Normal(mu_a[sending_pc_to_area], sigma_a))
logit_est = a[sending_pc] + b * sample_day
with numpyro.plate("data", len(sending_pc)):
numpyro.sample("obs", dist.BinomialLogits(logits=logit_est, total_count=total_counts), obs=omi_counts)
nuts_kernel = NUTS(model3)
mcmc = MCMC(nuts_kernel, num_samples=5000, num_warmup=2000)
rng_key = random.PRNGKey(0)
mcmc.run(rng_key, sending_pc_ID, sending_area, sample_day, total_counts, omi_counts=omi_counts)
mcmc.print_summary()
data = az.from_numpyro(mcmc)
az.plot_trace(data,compact=True)
plt.tight_layout()
s = numpyro.diagnostics.summary(mcmc.get_samples(group_by_chain=True))
print(f"""
Relatives tägliches Wachstum von Omikron gegenüber Delta:
{np.exp(s['b']['mean'])-1:.1%} mit 90%-Konfidenzintervall [{np.exp(s['b']['5.0%'])-1:.1%}, {np.exp(s['b']['95.0%'])-1:.1%}]
Relative Verdopplungszeit von Omikron gegenüber Delta:
{np.log(2)/np.log(1+s['b']['mean']):.2} Tage mit 90%-Konfidenzintervall [{np.log(2)/np.log(1+s['b']['95.0%']):.2},{np.log(2)/np.log(1+s['b']['5.0%']):.2}]
""")
nice_formatter = ticker.FuncFormatter(
lambda y, _: f'{ np.format_float_positional(100*y, trim="-", precision=6).rstrip(".")}%'
)
pdf = pd.DataFrame(s['a'])
pdf['plz'] = le.inverse_transform(pdf.index.values)
pdf.set_index('plz',inplace=True)
pdf.sort_values(by='median',inplace=True)
errors = ([expit(pdf['median'])-expit(pdf['5.0%']),expit(pdf['95.0%'])-expit(pdf['median'])])
sns.set_theme(style="whitegrid")
fig, ax = plt.subplots(figsize=(5,30))
ax.errorbar(x=expit(pdf['median'].values),y=pdf.index.values,xerr=errors,linestyle='None',marker='o')
sns.despine(left=True, bottom=True)
ax.set_ylim(-0.5,len(pdf)-0.5)
ax.tick_params(axis='both', which='major', labelbottom = True, bottom=False, top = False, labeltop=True)
ax.tick_params(axis='both', which='minor', labelbottom = False, bottom=False, top = False, labeltop=False)
ax.set_xscale('logit')
ax.xaxis.set_major_formatter(nice_formatter)
ax.set(title=f'Modellierter Omikron-Anteil nach PCR-Labor-PLZ am {today.strftime("%d.%m.%Y")}')
ax.set_ylabel("Postleitzahl des PCR-Probe-versendenden Labors")
ax.set_xlabel("Heutiger Omikron-Anteil in Surveillance-Proben mit 90%-KI")
plt.subplots_adjust(left=0.05, right=0.95, top=0.9, bottom=0.02)
fig.text(0.49, -0.003, f"Datenstand: {str(dt.date.today())} | Datenquelle: RKI Sequenzdaten https://doi.org/10.5281/zenodo.5139363 | Analyse: @CorneliusRoemer", size=6, va="bottom", ha="center")
fig.savefig("plots/omi_share_by_pc.png",dpi=300,bbox_inches='tight',pad_inches=0.2)
```
Es gibt drei klare Ausreißer die entfernt wurden. Für mehr Details zu Outliern siehe das Notebook [Outlier Labs](outlier_labs.ipynb).
```
pdf = pd.DataFrame(s['mu_a'])
pdf.sort_values(by='median',inplace=True)
errors = ([expit(pdf['median'])-expit(pdf['5.0%']),expit(pdf['95.0%'])-expit(pdf['median'])])
fig, ax = plt.subplots(figsize=(5,4))
sns.set_theme(style="whitegrid")
ax.errorbar(x=expit(pdf['median'].values),y=pdf.index.values.astype(str),xerr=errors,linestyle='None',marker='o')
sns.despine(left=True, bottom=True)
ax.set(title='Title')
ax.set_ylim(-0.5,len(pdf)-0.5)
ax.set_xscale('logit')
ax.xaxis.set_major_formatter(nice_formatter)
ax.tick_params(axis='both', which='major', labelbottom = True, bottom=False, top = False, labeltop=True)
ax.tick_params(axis='both', which='minor', labelbottom = False, bottom=False, top = False, labeltop=False)
ax.set(title=f'Modellierter Omikron-Anteil nach Postleitzahlgebiet am {today.strftime("%d.%m.%Y")}')
ax.set_ylabel("Postleitzahlgebiet des PCR-Labors")
ax.set_xlabel("Heutiger Omikron-Anteil in Surveillance-Proben mit 90%-KI")
plt.subplots_adjust(left=0.05, right=0.95, top=0.9, bottom=0.15)
fig.text(0.5, -0.01, f"Datenstand: {str(dt.date.today())} | Datenquelle: RKI Sequenzdaten https://doi.org/10.5281/zenodo.5139363 | Analyse: @CorneliusRoemer", size=6, va="bottom", ha="center")
fig.savefig("plots/omi_share_by_area.png",dpi=200,bbox_inches='tight',pad_inches=0.2)
```
|
github_jupyter
|
import datetime as dt
import arviz as az
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import numpyro
import numpyro.distributions as dist
import pandas as pd
import seaborn as sns
from jax import random
from numpyro.infer import MCMC, NUTS
from scipy.special import expit
from sklearn.preprocessing import LabelEncoder
df = pd.read_csv(
'data/meta_lineages.csv',
index_col=0,
parse_dates=[1,3],
infer_datetime_format=True,
cache_dates=True,
dtype = {'SEQ_REASON': 'category',
'SENDING_LAB_PC': 'category',
'SEQUENCING_LAB_PC': 'category',
'lineage': 'category',
'scorpio_call': 'category'
}
)
df.rename(columns={
'DATE_DRAW': 'date',
'PROCESSING_DATE': 'processing_date',
'SEQ_REASON': 'reason',
'SENDING_LAB_PC': 'sending_pc',
'SEQUENCING_LAB_PC': 'sequencing_pc',
'lineage': 'lineage',
'scorpio_call': 'scorpio'
},
inplace=True
)
df = df[df.date > '2021-10-01']
df = df[df.reason.isin(['N'])]
df['omi'] = df.lineage.apply(lambda x: int((x == 'BA.1')))
all = df.groupby(['sending_pc','date'])['omi'].count()
omi = df.groupby(['sending_pc','date'])['omi'].apply(lambda x: (x == 1).sum()).fillna(0).astype(int)
df_reshaped = pd.concat({'all': all, 'omi': omi}, axis=1).reset_index()
today = pd.Timestamp(str(dt.date.today()))
df_reshaped['days'] = (df_reshaped.date-today).dt.days
df_reshaped['sending_area'] = df_reshaped.sending_pc.apply(lambda x: int(x[0]))
lab_counts = df_reshaped[df_reshaped.date > '2021-12-01'].groupby('sending_pc')['all'].sum()
# df_filtered = df_reshaped[df_reshaped.sending_pc.isin(lab_counts[lab_counts>10].index) & (~df_reshaped.sending_pc.isin(['']))].copy()
df_filtered = df_reshaped[df_reshaped.sending_pc.isin(lab_counts[lab_counts>10].index) & (~df_reshaped.sending_pc.isin(['22767','70193','24106']))].copy()
le = LabelEncoder()
df_filtered['sending_pc_ID'] = le.fit_transform(df_filtered['sending_pc'].values)
omi_counts = df_filtered["omi"].values
total_counts = df_filtered["all"].values
sample_day = df_filtered["days"].values
sending_pc_ID = df_filtered['sending_pc_ID'].values
sending_area = df_filtered['sending_area'].values
from numpyro.infer.reparam import LocScaleReparam
reparam_config = {k: LocScaleReparam(0) for k in ["mu_a", "a"]}
@numpyro.handlers.reparam(config=reparam_config)
def model3(sending_pc, sending_area, sample_day, total_counts,omi_counts=None):
mu_a_mu = numpyro.sample("mu_a_mu", dist.Normal(0, 3.0))
mu_a_sigma = numpyro.sample("mu_a_sigma", dist.HalfNormal(2.0))
sigma_a = numpyro.sample("sigma_a", dist.HalfNormal(2.0))
b = numpyro.sample("b", dist.Normal(0.2, 0.2))
n_labs = len(np.unique(sending_pc))
n_areas = len(np.unique(sending_area))
sending_pc_to_area = pd.DataFrame({"sending_pc": sending_pc, "sending_area": sending_area}).groupby(['sending_pc']).sending_area.first().values
with numpyro.plate("plate_i", n_areas):
mu_a = numpyro.sample("mu_a", dist.Normal(mu_a_mu, mu_a_sigma))
with numpyro.plate("plate_j", n_labs):
a = numpyro.sample("a", dist.Normal(mu_a[sending_pc_to_area], sigma_a))
logit_est = a[sending_pc] + b * sample_day
with numpyro.plate("data", len(sending_pc)):
numpyro.sample("obs", dist.BinomialLogits(logits=logit_est, total_count=total_counts), obs=omi_counts)
nuts_kernel = NUTS(model3)
mcmc = MCMC(nuts_kernel, num_samples=5000, num_warmup=2000)
rng_key = random.PRNGKey(0)
mcmc.run(rng_key, sending_pc_ID, sending_area, sample_day, total_counts, omi_counts=omi_counts)
mcmc.print_summary()
data = az.from_numpyro(mcmc)
az.plot_trace(data,compact=True)
plt.tight_layout()
s = numpyro.diagnostics.summary(mcmc.get_samples(group_by_chain=True))
print(f"""
Relatives tägliches Wachstum von Omikron gegenüber Delta:
{np.exp(s['b']['mean'])-1:.1%} mit 90%-Konfidenzintervall [{np.exp(s['b']['5.0%'])-1:.1%}, {np.exp(s['b']['95.0%'])-1:.1%}]
Relative Verdopplungszeit von Omikron gegenüber Delta:
{np.log(2)/np.log(1+s['b']['mean']):.2} Tage mit 90%-Konfidenzintervall [{np.log(2)/np.log(1+s['b']['95.0%']):.2},{np.log(2)/np.log(1+s['b']['5.0%']):.2}]
""")
nice_formatter = ticker.FuncFormatter(
lambda y, _: f'{ np.format_float_positional(100*y, trim="-", precision=6).rstrip(".")}%'
)
pdf = pd.DataFrame(s['a'])
pdf['plz'] = le.inverse_transform(pdf.index.values)
pdf.set_index('plz',inplace=True)
pdf.sort_values(by='median',inplace=True)
errors = ([expit(pdf['median'])-expit(pdf['5.0%']),expit(pdf['95.0%'])-expit(pdf['median'])])
sns.set_theme(style="whitegrid")
fig, ax = plt.subplots(figsize=(5,30))
ax.errorbar(x=expit(pdf['median'].values),y=pdf.index.values,xerr=errors,linestyle='None',marker='o')
sns.despine(left=True, bottom=True)
ax.set_ylim(-0.5,len(pdf)-0.5)
ax.tick_params(axis='both', which='major', labelbottom = True, bottom=False, top = False, labeltop=True)
ax.tick_params(axis='both', which='minor', labelbottom = False, bottom=False, top = False, labeltop=False)
ax.set_xscale('logit')
ax.xaxis.set_major_formatter(nice_formatter)
ax.set(title=f'Modellierter Omikron-Anteil nach PCR-Labor-PLZ am {today.strftime("%d.%m.%Y")}')
ax.set_ylabel("Postleitzahl des PCR-Probe-versendenden Labors")
ax.set_xlabel("Heutiger Omikron-Anteil in Surveillance-Proben mit 90%-KI")
plt.subplots_adjust(left=0.05, right=0.95, top=0.9, bottom=0.02)
fig.text(0.49, -0.003, f"Datenstand: {str(dt.date.today())} | Datenquelle: RKI Sequenzdaten https://doi.org/10.5281/zenodo.5139363 | Analyse: @CorneliusRoemer", size=6, va="bottom", ha="center")
fig.savefig("plots/omi_share_by_pc.png",dpi=300,bbox_inches='tight',pad_inches=0.2)
pdf = pd.DataFrame(s['mu_a'])
pdf.sort_values(by='median',inplace=True)
errors = ([expit(pdf['median'])-expit(pdf['5.0%']),expit(pdf['95.0%'])-expit(pdf['median'])])
fig, ax = plt.subplots(figsize=(5,4))
sns.set_theme(style="whitegrid")
ax.errorbar(x=expit(pdf['median'].values),y=pdf.index.values.astype(str),xerr=errors,linestyle='None',marker='o')
sns.despine(left=True, bottom=True)
ax.set(title='Title')
ax.set_ylim(-0.5,len(pdf)-0.5)
ax.set_xscale('logit')
ax.xaxis.set_major_formatter(nice_formatter)
ax.tick_params(axis='both', which='major', labelbottom = True, bottom=False, top = False, labeltop=True)
ax.tick_params(axis='both', which='minor', labelbottom = False, bottom=False, top = False, labeltop=False)
ax.set(title=f'Modellierter Omikron-Anteil nach Postleitzahlgebiet am {today.strftime("%d.%m.%Y")}')
ax.set_ylabel("Postleitzahlgebiet des PCR-Labors")
ax.set_xlabel("Heutiger Omikron-Anteil in Surveillance-Proben mit 90%-KI")
plt.subplots_adjust(left=0.05, right=0.95, top=0.9, bottom=0.15)
fig.text(0.5, -0.01, f"Datenstand: {str(dt.date.today())} | Datenquelle: RKI Sequenzdaten https://doi.org/10.5281/zenodo.5139363 | Analyse: @CorneliusRoemer", size=6, va="bottom", ha="center")
fig.savefig("plots/omi_share_by_area.png",dpi=200,bbox_inches='tight',pad_inches=0.2)
| 0.3027 | 0.35209 |
The `greedy` function is taking too much time calculting a solution. I'm aware it has several loops but I'm curious if there's something that can be optimized, so in this notebook the functions of the greedy heuristic and the objective function evaluation will be interactively reviewed.
```
from models import Instance
```
Let's create a small size instance with $n = 20$, $p = 5$, and $\alpha = 2$
```
instance = Instance.random(20, 5, 2)
from heuristics.constructive import pdp_based
```
The greedy heuristic initiates the solution using a heuristic based on the PDP objective function using $p = \alpha$
```
solution = pdp_based(instance, use_alpha_as_p=True)
solution
```
In this case the initial solution is just the 2 farthest vertexes of the instance.
```
import itertools
```
Now let's use list comprehension to see the calculations as done by `greedy` function and then `eval_obj_func`. We're going to show only the first iteration so the output doesn't take too much space.
```
[(v, [
[
[(i, s, instance.get_dist(i, s)) for s in subset]
for subset in itertools.combinations(solution | {v}, instance.alpha)
]
for i in instance.indexes - (solution | {v})
])
for v in instance.indexes - solution][0]
```
We can understand the output as follows:
For the first vertex not in the solution, which is $1$, evaluate the objective function of the instance if that vertex were added to the solution. So for each vertex not in the new solution, $S \cup 1$, calculate its distance to each one in the solution.
The function was coded based in the mathematical notation of the objective function, which can be seen in the `obj_func.ipynb` notebook. That notation states that $\alpha$-size combinations of the solution are used to calculate or get the distance. But in the output we can see that the distances are duplicated, and this is a 3-size solution. Let's see what happens if we add another vertex, a random one for demonstration purpose:
```
solution |= {5}
solution
[(v, [
[
[(i, s, instance.get_dist(i, s)) for s in subset]
for subset in itertools.combinations(solution | {v}, instance.alpha)
]
for i in instance.indexes - (solution | {v})
])
for v in instance.indexes - solution][0]
```
Now the distances are in triplicate, so when the size gets to 5 vertexes things won't go well. Let's change the code to get the distance once and then getting the $\alpha$-th closest, but first let's see how much time it's currently taking for all vertexes:
```
%%timeit
[(v, [
[
[(i, s, instance.get_dist(i, s)) for s in subset]
for subset in itertools.combinations(solution | {v}, instance.alpha)
]
for i in instance.indexes - (solution | {v})
])
for v in instance.indexes - solution]
```
## Refactorization
```
[(v, [
[
(i, s, instance.get_dist(i, s))
for s in solution | {v}
]
for i in instance.indexes - (solution | {v})
])
for v in instance.indexes - solution][:1]
%%timeit
[(v, [
[
(i, s, instance.get_dist(i, s))
for s in solution | {v}
]
for i in instance.indexes - (solution | {v})
])
for v in instance.indexes - solution]
```
Let's focus on the objective function code first:
```
[
[
(v, s, instance.get_dist(v, s))
for s in solution
]
for v in instance.indexes - solution
]
```
If we decide to continue we won't be able to use the built-in `min` function. So let's compare getting the $\alpha$-th closest using `sorted` and `heapq.nsmallest` with the a list of random numbers as sample:
```
import random
r = [random.random() for _ in range(10**6)]
sorted(r)[instance.alpha - 1]
%timeit sorted(r)[instance.alpha - 1]
import heapq
heapq.nsmallest(instance.alpha, r)[-1]
%timeit heapq.nsmallest(instance.alpha, r)[-1]
sr = [random.random() for _ in range(50)]
%timeit sorted(sr)[instance.alpha - 1]
%timeit heapq.nsmallest(instance.alpha, sr)[-1]
```
So `sorted` seems to be a little bit faster with small samples, but clearly `heapq.nsmallest` is way faster with bigger ones, so let's use it:
```
def new_of(instance, solution):
return max(
heapq.nsmallest(
instance.alpha,
(instance.get_dist(v, s) for s in solution)
)[-1]
for v in instance.indexes - solution
)
new_of(instance, solution)
%timeit new_of(instance, solution)
from utils import eval_obj_func
eval_obj_func(instance, solution)
%timeit eval_obj_func(instance, solution)
```
Not so much of a difference. Let's use a bigger instance and a random solution:
```
biginstance = Instance.random(1000, 50, 2)
bigsolution = set(random.sample(biginstance.indexes, 50))
new_of(biginstance, bigsolution)
%timeit new_of(biginstance, bigsolution)
eval_obj_func(biginstance, bigsolution)
%timeit eval_obj_func(biginstance, bigsolution)
```
Good! Let's compare them again but this time with $\alpha = 3$:
```
biginstance.alpha = 3
new_of(biginstance, bigsolution)
%timeit new_of(biginstance, bigsolution)
```
The time is constant for the same values of $n$ and $p$ because now $\alpha$ is just an argument of `heapq.nsmallest`. On the other hand, the old objective function code depends on the value of $\alpha$. I timed it and +5 minutes went by and it still didn't finish, so I had to stop it.
Now let's wrap it inside the greedy heuristic function:
```
def new_greedy(instance):
solution = pdp_based(instance, use_alpha_as_p=True)
while len(solution) < instance.p:
index, dist = min((
(v, new_of(instance, solution | {v}))
for v in instance.indexes - solution
),
key=lambda m: m[1]
)
solution |= {index}
return solution
new_greedy(instance)
%timeit new_greedy(instance)
```
|
github_jupyter
|
from models import Instance
instance = Instance.random(20, 5, 2)
from heuristics.constructive import pdp_based
solution = pdp_based(instance, use_alpha_as_p=True)
solution
import itertools
[(v, [
[
[(i, s, instance.get_dist(i, s)) for s in subset]
for subset in itertools.combinations(solution | {v}, instance.alpha)
]
for i in instance.indexes - (solution | {v})
])
for v in instance.indexes - solution][0]
solution |= {5}
solution
[(v, [
[
[(i, s, instance.get_dist(i, s)) for s in subset]
for subset in itertools.combinations(solution | {v}, instance.alpha)
]
for i in instance.indexes - (solution | {v})
])
for v in instance.indexes - solution][0]
%%timeit
[(v, [
[
[(i, s, instance.get_dist(i, s)) for s in subset]
for subset in itertools.combinations(solution | {v}, instance.alpha)
]
for i in instance.indexes - (solution | {v})
])
for v in instance.indexes - solution]
[(v, [
[
(i, s, instance.get_dist(i, s))
for s in solution | {v}
]
for i in instance.indexes - (solution | {v})
])
for v in instance.indexes - solution][:1]
%%timeit
[(v, [
[
(i, s, instance.get_dist(i, s))
for s in solution | {v}
]
for i in instance.indexes - (solution | {v})
])
for v in instance.indexes - solution]
[
[
(v, s, instance.get_dist(v, s))
for s in solution
]
for v in instance.indexes - solution
]
import random
r = [random.random() for _ in range(10**6)]
sorted(r)[instance.alpha - 1]
%timeit sorted(r)[instance.alpha - 1]
import heapq
heapq.nsmallest(instance.alpha, r)[-1]
%timeit heapq.nsmallest(instance.alpha, r)[-1]
sr = [random.random() for _ in range(50)]
%timeit sorted(sr)[instance.alpha - 1]
%timeit heapq.nsmallest(instance.alpha, sr)[-1]
def new_of(instance, solution):
return max(
heapq.nsmallest(
instance.alpha,
(instance.get_dist(v, s) for s in solution)
)[-1]
for v in instance.indexes - solution
)
new_of(instance, solution)
%timeit new_of(instance, solution)
from utils import eval_obj_func
eval_obj_func(instance, solution)
%timeit eval_obj_func(instance, solution)
biginstance = Instance.random(1000, 50, 2)
bigsolution = set(random.sample(biginstance.indexes, 50))
new_of(biginstance, bigsolution)
%timeit new_of(biginstance, bigsolution)
eval_obj_func(biginstance, bigsolution)
%timeit eval_obj_func(biginstance, bigsolution)
biginstance.alpha = 3
new_of(biginstance, bigsolution)
%timeit new_of(biginstance, bigsolution)
def new_greedy(instance):
solution = pdp_based(instance, use_alpha_as_p=True)
while len(solution) < instance.p:
index, dist = min((
(v, new_of(instance, solution | {v}))
for v in instance.indexes - solution
),
key=lambda m: m[1]
)
solution |= {index}
return solution
new_greedy(instance)
%timeit new_greedy(instance)
| 0.496582 | 0.972985 |
# Training and Evaluating Machine Learning Models in cuML
This notebook explores several basic machine learning estimators in cuML, demonstrating how to train them and evaluate them with built-in metrics functions. All of the models are trained on synthetic data, generated by cuML's dataset utilities.
1. Random Forest Classifier
2. UMAP
3. DBSCAN
4. Linear Regression
[](https://colab.research.google.com/github/rapidsai/cuml/blob/branch-0.15/docs/source/estimator_intro.ipynb)
### Shared Library Imports
```
import cuml
from cupy import asnumpy
from joblib import dump, load
```
## 1. Classification
### Random Forest Classification and Accuracy metrics
The Random Forest algorithm classification model builds several decision trees, and aggregates each of their outputs to make a prediction. For more information on cuML's implementation of the Random Forest Classification model please refer to :
https://docs.rapids.ai/api/cuml/stable/api.html#cuml.ensemble.RandomForestClassifier
Accuracy score is the ratio of correct predictions to the total number of predictions. It is used to measure the performance of classification models.
For more information on the accuracy score metric please refer to: https://en.wikipedia.org/wiki/Accuracy_and_precision
For more information on cuML's implementation of accuracy score metrics please refer to: https://docs.rapids.ai/api/cuml/stable/api.html#cuml.metrics.accuracy.accuracy_score
The cell below shows an end to end pipeline of the Random Forest Classification model. Here the dataset was generated by using sklearn's make_classification dataset. The generated dataset was used to train and run predict on the model. Random forest's performance is evaluated and then compared between the values obtained from the cuML and sklearn accuracy metrics.
```
from cuml.datasets.classification import make_classification
from cuml.preprocessing.model_selection import train_test_split
from cuml.ensemble import RandomForestClassifier as cuRF
from sklearn.metrics import accuracy_score
# synthetic dataset dimensions
n_samples = 1000
n_features = 10
n_classes = 2
# random forest depth and size
n_estimators = 25
max_depth = 10
# generate synthetic data [ binary classification task ]
X, y = make_classification ( n_classes = n_classes,
n_features = n_features,
n_samples = n_samples,
random_state = 0 )
X_train, X_test, y_train, y_test = train_test_split( X, y, random_state = 0 )
model = cuRF( max_depth = max_depth,
n_estimators = n_estimators,
random_state = 0 )
trained_RF = model.fit ( X_train, y_train )
predictions = model.predict ( X_test )
cu_score = cuml.metrics.accuracy_score( y_test, predictions )
sk_score = accuracy_score( asnumpy( y_test ), asnumpy( predictions ) )
print( " cuml accuracy: ", cu_score )
print( " sklearn accuracy : ", sk_score )
# save
dump( trained_RF, 'RF.model')
# to reload the model uncomment the line below
loaded_model = load('RF.model')
```
## Clustering
### UMAP and Trustworthiness metrics
UMAP is a dimensionality reduction algorithm which performs non-linear dimension reduction. It can also be used for visualization.
For additional information on the UMAP model please refer to the documentation on https://docs.rapids.ai/api/cuml/stable/api.html#cuml.UMAP
Trustworthiness is a measure of the extent to which the local structure is retained in the embedding of the model. Therefore, if a sample predicted by the model lay within the unexpected region of the nearest neighbors, then those samples would be penalized. For more information on the trustworthiness metric please refer to: https://scikit-learn.org/dev/modules/generated/sklearn.manifold.t_sne.trustworthiness.html
the documentation for cuML's implementation of the trustworthiness metric is: https://docs.rapids.ai/api/cuml/stable/api.html#cuml.metrics.trustworthiness.trustworthiness
The cell below shows an end to end pipeline of UMAP model. Here, the blobs dataset is created by cuml's equivalent of make_blobs function to be used as the input. The output of UMAP's fit_transform is evaluated using the trustworthiness function. The values obtained by sklearn and cuml's trustworthiness are compared below.
```
from cuml.datasets import make_blobs
from cuml.manifold.umap import UMAP as cuUMAP
from sklearn.manifold import trustworthiness
import numpy as np
n_samples = 1000
n_features = 100
cluster_std = 0.1
X_blobs, y_blobs = make_blobs( n_samples = n_samples,
cluster_std = cluster_std,
n_features = n_features,
random_state = 0,
dtype=np.float32 )
trained_UMAP = cuUMAP( n_neighbors = 10 ).fit( X_blobs )
X_embedded = trained_UMAP.transform( X_blobs )
cu_score = cuml.metrics.trustworthiness( X_blobs, X_embedded )
sk_score = trustworthiness( asnumpy( X_blobs ), asnumpy( X_embedded ) )
print(" cuml's trustworthiness score : ", cu_score )
print(" sklearn's trustworthiness score : ", sk_score )
# save
dump( trained_UMAP, 'UMAP.model')
# to reload the model uncomment the line below
# loaded_model = load('UMAP.model')
```
### DBSCAN and Adjusted Random Index
DBSCAN is a popular and a powerful clustering algorithm. For additional information on the DBSCAN model please refer to the documentation on https://docs.rapids.ai/api/cuml/stable/api.html#cuml.DBSCAN
We create the blobs dataset using the cuml equivalent of make_blobs function.
Adjusted random index is a metric which is used to measure the similarity between two data clusters, and it is adjusted to take into consideration the chance grouping of elements.
For more information on Adjusted random index please refer to: https://en.wikipedia.org/wiki/Rand_index
The cell below shows an end to end model of DBSCAN. The output of DBSCAN's fit_predict is evaluated using the Adjusted Random Index function. The values obtained by sklearn and cuml's adjusted random metric are compared below.
```
from cuml.datasets import make_blobs
from cuml import DBSCAN as cumlDBSCAN
from sklearn.metrics import adjusted_rand_score
import numpy as np
n_samples = 1000
n_features = 100
cluster_std = 0.1
X_blobs, y_blobs = make_blobs( n_samples = n_samples,
n_features = n_features,
cluster_std = cluster_std,
random_state = 0,
dtype=np.float32 )
cuml_dbscan = cumlDBSCAN( eps = 3,
min_samples = 2)
trained_DBSCAN = cuml_dbscan.fit( X_blobs )
cu_y_pred = trained_DBSCAN.fit_predict ( X_blobs )
cu_adjusted_rand_index = cuml.metrics.cluster.adjusted_rand_score( y_blobs, cu_y_pred )
sk_adjusted_rand_index = adjusted_rand_score( asnumpy(y_blobs), asnumpy(cu_y_pred) )
print(" cuml's adjusted random index score : ", cu_adjusted_rand_index)
print(" sklearn's adjusted random index score : ", sk_adjusted_rand_index)
# save and optionally reload
dump( trained_DBSCAN, 'DBSCAN.model')
# to reload the model uncomment the line below
# loaded_model = load('DBSCAN.model')
```
## Regression
### Linear regression and R^2 score
Linear Regression is a simple machine learning model where the response y is modelled by a linear combination of the predictors in X.
R^2 score is also known as the coefficient of determination. It is used as a metric for scoring regression models. It scores the output of the model based on the proportion of total variation of the model.
For more information on the R^2 score metrics please refer to: https://en.wikipedia.org/wiki/Coefficient_of_determination
For more information on cuML's implementation of the r2 score metrics please refer to : https://docs.rapids.ai/api/cuml/stable/api.html#cuml.metrics.regression.r2_score
The cell below uses the Linear Regression model to compare the results between cuML and sklearn trustworthiness metric. For more information on cuML's implementation of the Linear Regression model please refer to :
https://docs.rapids.ai/api/cuml/stable/api.html#linear-regression
```
from cuml.datasets import make_regression
from cuml.preprocessing.model_selection import train_test_split
from cuml.linear_model import LinearRegression as cuLR
from sklearn.metrics import r2_score
n_samples = 2**10
n_features = 100
n_info = 70
X_reg, y_reg = make_regression( n_samples = n_samples,
n_features = n_features,
n_informative = n_info,
random_state = 123 )
X_reg_train, X_reg_test, y_reg_train, y_reg_test = train_test_split( X_reg,
y_reg,
train_size = 0.8,
random_state = 10 )
cuml_reg_model = cuLR( fit_intercept = True,
normalize = True,
algorithm = 'eig' )
trained_LR = cuml_reg_model.fit( X_reg_train, y_reg_train )
cu_preds = trained_LR.predict( X_reg_test )
cu_r2 = cuml.metrics.r2_score( y_reg_test, cu_preds )
sk_r2 = r2_score( asnumpy( y_reg_test ), asnumpy( cu_preds ) )
print("cuml's r2 score : ", cu_r2)
print("sklearn's r2 score : ", sk_r2)
# save and reload
dump( trained_LR, 'LR.model')
# to reload the model uncomment the line below
# loaded_model = load('LR.model')
```
|
github_jupyter
|
import cuml
from cupy import asnumpy
from joblib import dump, load
from cuml.datasets.classification import make_classification
from cuml.preprocessing.model_selection import train_test_split
from cuml.ensemble import RandomForestClassifier as cuRF
from sklearn.metrics import accuracy_score
# synthetic dataset dimensions
n_samples = 1000
n_features = 10
n_classes = 2
# random forest depth and size
n_estimators = 25
max_depth = 10
# generate synthetic data [ binary classification task ]
X, y = make_classification ( n_classes = n_classes,
n_features = n_features,
n_samples = n_samples,
random_state = 0 )
X_train, X_test, y_train, y_test = train_test_split( X, y, random_state = 0 )
model = cuRF( max_depth = max_depth,
n_estimators = n_estimators,
random_state = 0 )
trained_RF = model.fit ( X_train, y_train )
predictions = model.predict ( X_test )
cu_score = cuml.metrics.accuracy_score( y_test, predictions )
sk_score = accuracy_score( asnumpy( y_test ), asnumpy( predictions ) )
print( " cuml accuracy: ", cu_score )
print( " sklearn accuracy : ", sk_score )
# save
dump( trained_RF, 'RF.model')
# to reload the model uncomment the line below
loaded_model = load('RF.model')
from cuml.datasets import make_blobs
from cuml.manifold.umap import UMAP as cuUMAP
from sklearn.manifold import trustworthiness
import numpy as np
n_samples = 1000
n_features = 100
cluster_std = 0.1
X_blobs, y_blobs = make_blobs( n_samples = n_samples,
cluster_std = cluster_std,
n_features = n_features,
random_state = 0,
dtype=np.float32 )
trained_UMAP = cuUMAP( n_neighbors = 10 ).fit( X_blobs )
X_embedded = trained_UMAP.transform( X_blobs )
cu_score = cuml.metrics.trustworthiness( X_blobs, X_embedded )
sk_score = trustworthiness( asnumpy( X_blobs ), asnumpy( X_embedded ) )
print(" cuml's trustworthiness score : ", cu_score )
print(" sklearn's trustworthiness score : ", sk_score )
# save
dump( trained_UMAP, 'UMAP.model')
# to reload the model uncomment the line below
# loaded_model = load('UMAP.model')
from cuml.datasets import make_blobs
from cuml import DBSCAN as cumlDBSCAN
from sklearn.metrics import adjusted_rand_score
import numpy as np
n_samples = 1000
n_features = 100
cluster_std = 0.1
X_blobs, y_blobs = make_blobs( n_samples = n_samples,
n_features = n_features,
cluster_std = cluster_std,
random_state = 0,
dtype=np.float32 )
cuml_dbscan = cumlDBSCAN( eps = 3,
min_samples = 2)
trained_DBSCAN = cuml_dbscan.fit( X_blobs )
cu_y_pred = trained_DBSCAN.fit_predict ( X_blobs )
cu_adjusted_rand_index = cuml.metrics.cluster.adjusted_rand_score( y_blobs, cu_y_pred )
sk_adjusted_rand_index = adjusted_rand_score( asnumpy(y_blobs), asnumpy(cu_y_pred) )
print(" cuml's adjusted random index score : ", cu_adjusted_rand_index)
print(" sklearn's adjusted random index score : ", sk_adjusted_rand_index)
# save and optionally reload
dump( trained_DBSCAN, 'DBSCAN.model')
# to reload the model uncomment the line below
# loaded_model = load('DBSCAN.model')
from cuml.datasets import make_regression
from cuml.preprocessing.model_selection import train_test_split
from cuml.linear_model import LinearRegression as cuLR
from sklearn.metrics import r2_score
n_samples = 2**10
n_features = 100
n_info = 70
X_reg, y_reg = make_regression( n_samples = n_samples,
n_features = n_features,
n_informative = n_info,
random_state = 123 )
X_reg_train, X_reg_test, y_reg_train, y_reg_test = train_test_split( X_reg,
y_reg,
train_size = 0.8,
random_state = 10 )
cuml_reg_model = cuLR( fit_intercept = True,
normalize = True,
algorithm = 'eig' )
trained_LR = cuml_reg_model.fit( X_reg_train, y_reg_train )
cu_preds = trained_LR.predict( X_reg_test )
cu_r2 = cuml.metrics.r2_score( y_reg_test, cu_preds )
sk_r2 = r2_score( asnumpy( y_reg_test ), asnumpy( cu_preds ) )
print("cuml's r2 score : ", cu_r2)
print("sklearn's r2 score : ", sk_r2)
# save and reload
dump( trained_LR, 'LR.model')
# to reload the model uncomment the line below
# loaded_model = load('LR.model')
| 0.583678 | 0.978975 |
# Timeseries tutorial
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sys
import os
timeseries_path = os.path.join('..', 'pvops', 'timeseries')
sys.path.append('..')
sys.path.append(timeseries_path)
from pvops.timeseries import preprocess
from pvops.timeseries.models import linear, iec
from pvops.text2time import utils as t2t_utils, preprocess as t2t_preprocess
example_OMpath = os.path.join('example_data', 'example_om_data2.csv')
example_prodpath = os.path.join('example_data', 'example_prod_with_covariates.csv')
example_metapath = os.path.join('example_data', 'example_metadata2.csv')
prod_data = pd.read_csv(example_prodpath, error_bad_lines=False, engine='python')
prod_data.head()
metadata = pd.DataFrame()
metadata['randid'] = ['R15', 'R10']
metadata['dcsize'] = [20000, 20000]
metadata.head()
#Format for dictionaries is {pvops variable: user-specific column names}
prod_col_dict = {'siteid': 'randid',
'timestamp': 'date',
'powerprod': 'generated_kW',
'irradiance':'irrad_poa_Wm2',
'temperature':'temp_amb_C', # Optional parameter, used by one of the modeling structures
'baseline': 'IEC_pstep', #user's name choice for new column (baseline expected energy defined by user or calculated based on IEC)
'dcsize': 'dcsize', #user's name choice for new column (System DC-size, extracted from meta-data)
'compared': 'Compared',#user's name choice for new column
'energy_pstep': 'Energy_pstep', #user's name choice for new column
'capacity_normalized_power': 'capacity_normalized_power', #user's name choice for new column
}
metad_col_dict = {'siteid': 'randid',
'dcsize': 'dcsize'}
```
### Data Formatting
```
prod_data_converted = t2t_preprocess.prod_date_convert(prod_data, prod_col_dict)
prod_data_datena_d, _ = t2t_preprocess.prod_nadate_process(prod_data_converted, prod_col_dict, pnadrop=True)
prod_data_datena_d.index = prod_data_datena_d[prod_col_dict['timestamp']]
min(prod_data_datena_d.index), max(prod_data_datena_d.index)
```
### Data Preprocessing
```
masked_prod_data = preprocess.prod_inverter_clipping_filter(prod_data_datena_d, prod_col_dict, metadata, metad_col_dict, 'threshold', freq=60)
filtered_prod_data = masked_prod_data[masked_prod_data['mask'] == False]
print(f"Detected and removed {sum(masked_prod_data['mask'])} rows with inverter clipping.")
# Visualize the power signal versus covariates for one site
temp = filtered_prod_data[filtered_prod_data['randid'] == 'R10']
for xcol in ['irrad_poa_Wm2', 'temp_amb_C', 'wind_speed_ms']:
plt.scatter(temp[xcol], temp[prod_col_dict['powerprod']])
plt.title(xcol)
plt.grid()
plt.show()
# Normalize the power by capacity
for site in metadata[metad_col_dict['siteid']].unique():
site_metad_mask = metadata[metad_col_dict['siteid']] == site
site_prod_mask = filtered_prod_data[prod_col_dict['siteid']] == site
dcsize = metadata.loc[site_metad_mask, metad_col_dict['dcsize']].values[0]
filtered_prod_data.loc[site_prod_mask, prod_col_dict['capacity_normalized_power']] = filtered_prod_data.loc[site_prod_mask, prod_col_dict['powerprod']] / dcsize
# Visualize the power signal versus covariates for one site
temp = filtered_prod_data[filtered_prod_data['randid'] == 'R10']
for xcol in ['irrad_poa_Wm2', 'temp_amb_C', 'wind_speed_ms']:
plt.scatter(temp[xcol], temp[prod_col_dict['capacity_normalized_power']])
plt.title(xcol)
plt.grid()
plt.show()
```
### Dynamic linear modeling
```
model_prod_data = filtered_prod_data.dropna(subset=['irrad_poa_Wm2', 'temp_amb_C', 'wind_speed_ms']+[prod_col_dict['powerprod']])
# Make sure to only pass data for one site! If sites are very similar, you can consider providing both sites.
model, train_df, test_df = linear.modeller(prod_col_dict,
kernel_type='polynomial',
#time_weighted='month',
X_parameters=['irrad_poa_Wm2', 'temp_amb_C'],#, 'wind_speed_ms'],
Y_parameter='generated_kW',
prod_df=model_prod_data,
test_split=0.05,
degree=3,
verbose=1)
from sklearn.metrics import mean_squared_error, r2_score
def plot(model, prod_col_dict, data_split='test', npts=50):
def print_info(real,pred,name):
mse = mean_squared_error(real, pred)
r2 = r2_score(real, pred)
print(f'[{name}] Mean squared error: %.2f'
% mse)
print(f'[{name}] Coefficient of determination: %.2f'
% r2)
fig,(ax) = plt.subplots(figsize=(14,8))
if data_split == 'test':
df = test_df
elif data_split == 'train':
df = train_df
measured = model.estimators['OLS'][f'{data_split}_y'][:npts]
ax2 = ax.twinx()
ax2.plot(model.estimators['OLS'][f'{data_split}_index'][:npts], df[prod_col_dict['irradiance']].values[:npts], 'k', label='irradiance')
ax.plot(model.estimators['OLS'][f'{data_split}_index'][:npts], df['expected_kW'].values[:npts], label='partner_expected')
print_info(measured, df['expected_kW'].values[:npts], 'partner_expected')
ax.plot(model.estimators['OLS'][f'{data_split}_index'][:npts], measured, label='measured')
for name, info in model.estimators.items():
predicted = model.estimators[name][f'{data_split}_prediction'][:npts]
ax.plot(model.estimators[name][f'{data_split}_index'][:npts], predicted, label=name)
print_info(measured, predicted, name)
ax2.set_ylabel("Irradiance (W/m2)")
ax.set_ylabel("Power (W)")
ax.set_xlabel('Time')
handles, labels = [(a+b) for a, b in zip(ax.get_legend_handles_labels(), ax2.get_legend_handles_labels())]
ax.legend(handles, labels, loc='best')
plt.show()
```
### Observe performance
```
plot(model, prod_col_dict, data_split='train', npts=40)
plot(model, prod_col_dict, data_split='test', npts=40)
```
|
github_jupyter
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sys
import os
timeseries_path = os.path.join('..', 'pvops', 'timeseries')
sys.path.append('..')
sys.path.append(timeseries_path)
from pvops.timeseries import preprocess
from pvops.timeseries.models import linear, iec
from pvops.text2time import utils as t2t_utils, preprocess as t2t_preprocess
example_OMpath = os.path.join('example_data', 'example_om_data2.csv')
example_prodpath = os.path.join('example_data', 'example_prod_with_covariates.csv')
example_metapath = os.path.join('example_data', 'example_metadata2.csv')
prod_data = pd.read_csv(example_prodpath, error_bad_lines=False, engine='python')
prod_data.head()
metadata = pd.DataFrame()
metadata['randid'] = ['R15', 'R10']
metadata['dcsize'] = [20000, 20000]
metadata.head()
#Format for dictionaries is {pvops variable: user-specific column names}
prod_col_dict = {'siteid': 'randid',
'timestamp': 'date',
'powerprod': 'generated_kW',
'irradiance':'irrad_poa_Wm2',
'temperature':'temp_amb_C', # Optional parameter, used by one of the modeling structures
'baseline': 'IEC_pstep', #user's name choice for new column (baseline expected energy defined by user or calculated based on IEC)
'dcsize': 'dcsize', #user's name choice for new column (System DC-size, extracted from meta-data)
'compared': 'Compared',#user's name choice for new column
'energy_pstep': 'Energy_pstep', #user's name choice for new column
'capacity_normalized_power': 'capacity_normalized_power', #user's name choice for new column
}
metad_col_dict = {'siteid': 'randid',
'dcsize': 'dcsize'}
prod_data_converted = t2t_preprocess.prod_date_convert(prod_data, prod_col_dict)
prod_data_datena_d, _ = t2t_preprocess.prod_nadate_process(prod_data_converted, prod_col_dict, pnadrop=True)
prod_data_datena_d.index = prod_data_datena_d[prod_col_dict['timestamp']]
min(prod_data_datena_d.index), max(prod_data_datena_d.index)
masked_prod_data = preprocess.prod_inverter_clipping_filter(prod_data_datena_d, prod_col_dict, metadata, metad_col_dict, 'threshold', freq=60)
filtered_prod_data = masked_prod_data[masked_prod_data['mask'] == False]
print(f"Detected and removed {sum(masked_prod_data['mask'])} rows with inverter clipping.")
# Visualize the power signal versus covariates for one site
temp = filtered_prod_data[filtered_prod_data['randid'] == 'R10']
for xcol in ['irrad_poa_Wm2', 'temp_amb_C', 'wind_speed_ms']:
plt.scatter(temp[xcol], temp[prod_col_dict['powerprod']])
plt.title(xcol)
plt.grid()
plt.show()
# Normalize the power by capacity
for site in metadata[metad_col_dict['siteid']].unique():
site_metad_mask = metadata[metad_col_dict['siteid']] == site
site_prod_mask = filtered_prod_data[prod_col_dict['siteid']] == site
dcsize = metadata.loc[site_metad_mask, metad_col_dict['dcsize']].values[0]
filtered_prod_data.loc[site_prod_mask, prod_col_dict['capacity_normalized_power']] = filtered_prod_data.loc[site_prod_mask, prod_col_dict['powerprod']] / dcsize
# Visualize the power signal versus covariates for one site
temp = filtered_prod_data[filtered_prod_data['randid'] == 'R10']
for xcol in ['irrad_poa_Wm2', 'temp_amb_C', 'wind_speed_ms']:
plt.scatter(temp[xcol], temp[prod_col_dict['capacity_normalized_power']])
plt.title(xcol)
plt.grid()
plt.show()
model_prod_data = filtered_prod_data.dropna(subset=['irrad_poa_Wm2', 'temp_amb_C', 'wind_speed_ms']+[prod_col_dict['powerprod']])
# Make sure to only pass data for one site! If sites are very similar, you can consider providing both sites.
model, train_df, test_df = linear.modeller(prod_col_dict,
kernel_type='polynomial',
#time_weighted='month',
X_parameters=['irrad_poa_Wm2', 'temp_amb_C'],#, 'wind_speed_ms'],
Y_parameter='generated_kW',
prod_df=model_prod_data,
test_split=0.05,
degree=3,
verbose=1)
from sklearn.metrics import mean_squared_error, r2_score
def plot(model, prod_col_dict, data_split='test', npts=50):
def print_info(real,pred,name):
mse = mean_squared_error(real, pred)
r2 = r2_score(real, pred)
print(f'[{name}] Mean squared error: %.2f'
% mse)
print(f'[{name}] Coefficient of determination: %.2f'
% r2)
fig,(ax) = plt.subplots(figsize=(14,8))
if data_split == 'test':
df = test_df
elif data_split == 'train':
df = train_df
measured = model.estimators['OLS'][f'{data_split}_y'][:npts]
ax2 = ax.twinx()
ax2.plot(model.estimators['OLS'][f'{data_split}_index'][:npts], df[prod_col_dict['irradiance']].values[:npts], 'k', label='irradiance')
ax.plot(model.estimators['OLS'][f'{data_split}_index'][:npts], df['expected_kW'].values[:npts], label='partner_expected')
print_info(measured, df['expected_kW'].values[:npts], 'partner_expected')
ax.plot(model.estimators['OLS'][f'{data_split}_index'][:npts], measured, label='measured')
for name, info in model.estimators.items():
predicted = model.estimators[name][f'{data_split}_prediction'][:npts]
ax.plot(model.estimators[name][f'{data_split}_index'][:npts], predicted, label=name)
print_info(measured, predicted, name)
ax2.set_ylabel("Irradiance (W/m2)")
ax.set_ylabel("Power (W)")
ax.set_xlabel('Time')
handles, labels = [(a+b) for a, b in zip(ax.get_legend_handles_labels(), ax2.get_legend_handles_labels())]
ax.legend(handles, labels, loc='best')
plt.show()
plot(model, prod_col_dict, data_split='train', npts=40)
plot(model, prod_col_dict, data_split='test', npts=40)
| 0.458834 | 0.696991 |
<a href="https://colab.research.google.com/github/mrdbourke/pytorch-deep-learning/blob/main/06_pytorch_transfer_learning.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# 06. PyTorch Transfer Learning
> **Note:** There are a few updates coming in `torchvision` v0.13 that will mean some of the code in this notebook will be deprecated in the future, mainly the used of `torchvision.models.model_name(pretrained=True)`, the `pretrained=True` part is getting changed.
>
> The concepts in this notebook are still valid for `torchvision` versions prior to v0.13, however for the latest and most up-to-date code refer to [`06_pytorch_transfer_learning_v2.ipynb`](https://www.learnpytorch.io/06_pytorch_transfer_learning_v2/).
>
> As of June 2022, `torchvision` v0.13 is still in beta but once it becomes the standard, I will replace this notebook with the updated version.
We've built a few models by hand so far.
But their performance has been poor.
You might be thinking, **is there a well-performing model that already exists for our problem?**
And in the world of deep learning, the answer is often *yes*.
We'll see how by using a powerful technique called [**transfer learning**](https://developers.google.com/machine-learning/glossary#transfer-learning).
## What is transfer learning?
**Transfer learning** allows us to take the patterns (also called weights) another model has learned from another problem and use them for our own problem.
For example, we can take the patterns a computer vision model has learned from datasets such as [ImageNet](https://www.image-net.org/) (millions of images of different objects) and use them to power our FoodVision Mini model.
Or we could take the patterns from a [langauge model](https://developers.google.com/machine-learning/glossary#masked-language-model) (a model that's been through large amounts of text to learn a representation of language) and use them as the basis of a model to classify different text samples.
The premise remains: find a well-performing existing model and apply it to your own problem.
<img src="https://raw.githubusercontent.com/mrdbourke/pytorch-deep-learning/main/images/06-transfer-learning-example-overview.png" alt="transfer learning overview on different problems" width=900/>
*Example of transfer learning being applied to computer vision and natural language processing (NLP). In the case of computer vision, a computer vision model might learn patterns on millions of images in ImageNet and then use those patterns to infer on another problem. And for NLP, a language model may learn the structure of language by reading all of Wikipedia (and perhaps more) and then apply that knowledge to a different problem.*
## Why use transfer learning?
There are two main benefits to using transfer learning:
1. Can leverage an existing model (usually a neural network architecture) proven to work on problems similar to our own.
2. Can leverage a working model which has **already learned** patterns on similar data to our own. This often results in achieving **great results with less custom data**.
<img src="https://raw.githubusercontent.com/mrdbourke/pytorch-deep-learning/main/images/06-transfer-learning-for-foodvision-mini%20.png" alt="transfer learning applied to FoodVision Mini" width=900/>
*We'll be putting these to the test for our FoodVision Mini problem, we'll take a computer vision model pretrained on ImageNet and try to leverage its underlying learned representations for classifying images of pizza, steak and sushi.*
## Where to find pretrained models
The world of deep learning is an amazing place.
So amazing that many people around the world share their work.
Often, code and pretrained models for the latest state-of-the-art research is released within a few days of publishing.
And there are several places you can find pretrained models to use for your own problems.
| **Location** | **What's there?** | **Link(s)** |
| ----- | ----- | ----- |
| **PyTorch domain libraries** | Each of the PyTorch domain libraries (`torchvision`, `torchtext`) come with pretrained models of some form. The models there work right within PyTorch. | [`torchvision.models`](https://pytorch.org/vision/stable/models.html), [`torchtext.models`](https://pytorch.org/text/main/models.html), [`torchaudio.models`](https://pytorch.org/audio/stable/models.html), [`torchrec.models`](https://pytorch.org/torchrec/torchrec.models.html) |
| **HuggingFace Hub** | A series of pretrained models on many different domains (vision, text, audio and more) from organizations around the world. There's plenty of different datasets too. | https://huggingface.co/models, https://huggingface.co/datasets |
| **`timm` (PyTorch Image Models) library** | Almost all of the latest and greatest computer vision models in PyTorch code as well as plenty of other helpful computer vision features. | https://github.com/rwightman/pytorch-image-models|
| **Paperswithcode** | A collection of the latest state-of-the-art machine learning papers with code implementations attached. You can also find benchmarks here of model performance on different tasks. | https://paperswithcode.com/ |
<img src="https://raw.githubusercontent.com/mrdbourke/pytorch-deep-learning/main/images/06-transfer-learning-where-to-find-pretrained-models.png" alt="different locations to find pretrained neural network models" width=900/>
*With access to such high-quality resources as above, it should be common practice at the start of every deep learning problem you take on to ask, "Does a pretrained model exist for my problem?"*
> **Exercise:** Spend 5-minutes going through [`torchvision.models`](https://pytorch.org/vision/stable/models.html) as well as the [HuggingFace Hub Models page](https://huggingface.co/models), what do you find? (there's no right answers here, it's just to practice exploring)
## What we're going to cover
We're going to take a pretrained model from `torchvision.models` and customise it to work on (and hopefully improve) our FoodVision Mini problem.
| **Topic** | **Contents** |
| ----- | ----- |
| **0. Getting setup** | We've written a fair bit of useful code over the past few sections, let's download it and make sure we can use it again. |
| **1. Get data** | Let's get the pizza, steak and sushi image classification dataset we've been using to try and improve our model's results. |
| **2. Create Datasets and DataLoaders** | We'll use the `data_setup.py` script we wrote in chapter 05. PyTorch Going Modular to setup our DataLoaders. |
| **3. Get and customise a pretrained model** | Here we'll download a pretrained model from `torchvision.models` and customise it to our own problem. |
| **4. Train model** | Let's see how the new pretrained model goes on our pizza, steak, sushi dataset. We'll use the training functions we created in the previous chapter. |
| **5. Evaluate the model by plotting loss curves** | How did our first transfer learning model go? Did it overfit or underfit? |
| **6. Make predictions on images from the test set** | It's one thing to check out a model's evaluation metrics but it's another thing to view its predictions on test samples, let's *visualize, visualize, visualize*! |
## Where can you get help?
All of the materials for this course [are available on GitHub](https://github.com/mrdbourke/pytorch-deep-learning).
If you run into trouble, you can ask a question on the course [GitHub Discussions page](https://github.com/mrdbourke/pytorch-deep-learning/discussions).
And of course, there's the [PyTorch documentation](https://pytorch.org/docs/stable/index.html) and [PyTorch developer forums](https://discuss.pytorch.org/), a very helpful place for all things PyTorch.
## 0. Getting setup
Let's get started by importing/downloading the required modules for this section.
To save us writing extra code, we're going to be leveraging some of the Python scripts (such as `data_setup.py` and `engine.py`) we created in the previous section, [05. PyTorch Going Modular](https://www.learnpytorch.io/05_pytorch_going_modular/).
Specifically, we're going to download the [`going_modular`](https://github.com/mrdbourke/pytorch-deep-learning/tree/main/going_modular) directory from the `pytorch-deep-learning` repository (if we don't already have it).
We'll also get the [`torchinfo`](https://github.com/TylerYep/torchinfo) package if it's not available.
`torchinfo` will help later on to give us a visual representation of our model.
```
import matplotlib.pyplot as plt
import torch
import torchvision
from torch import nn
from torchvision import transforms
# Try to get torchinfo, install it if it doesn't work
try:
from torchinfo import summary
except:
print("[INFO] Couldn't find torchinfo... installing it.")
!pip install -q torchinfo
from torchinfo import summary
# Try to import the going_modular directory, download it from GitHub if it doesn't work
try:
from going_modular.going_modular import data_setup, engine
except:
# Get the going_modular scripts
print("[INFO] Couldn't find going_modular scripts... downloading them from GitHub.")
!git clone https://github.com/mrdbourke/pytorch-deep-learning
!mv pytorch-deep-learning/going_modular .
!rm -rf pytorch-deep-learning
from going_modular.going_modular import data_setup, engine
```
Now let's setup device agnostic code.
> **Note:** If you're using Google Colab, and you don't have a GPU turned on yet, it's now time to turn one on via `Runtime -> Change runtime type -> Hardware accelerator -> GPU`.
```
# Setup device agnostic code
device = "cuda" if torch.cuda.is_available() else "cpu"
device
```
## 1. Get data
Before we can start to use **transfer learning**, we'll need a dataset.
To see how transfer learning compares to our previous attempts at model building, we'll download the same dataset we've been using for FoodVision Mini.
Let's write some code to download the [`pizza_steak_sushi.zip`](https://github.com/mrdbourke/pytorch-deep-learning/blob/main/data/pizza_steak_sushi.zip) dataset from the course GitHub and then unzip it.
We can also make sure if we've already got the data, it doesn't redownload.
```
import os
import zipfile
from pathlib import Path
import requests
# Setup path to data folder
data_path = Path("data/")
image_path = data_path / "pizza_steak_sushi"
# If the image folder doesn't exist, download it and prepare it...
if image_path.is_dir():
print(f"{image_path} directory exists.")
else:
print(f"Did not find {image_path} directory, creating one...")
image_path.mkdir(parents=True, exist_ok=True)
# Download pizza, steak, sushi data
with open(data_path / "pizza_steak_sushi.zip", "wb") as f:
request = requests.get("https://github.com/mrdbourke/pytorch-deep-learning/raw/main/data/pizza_steak_sushi.zip")
print("Downloading pizza, steak, sushi data...")
f.write(request.content)
# Unzip pizza, steak, sushi data
with zipfile.ZipFile(data_path / "pizza_steak_sushi.zip", "r") as zip_ref:
print("Unzipping pizza, steak, sushi data...")
zip_ref.extractall(image_path)
# Remove .zip file
os.remove(data_path / "pizza_steak_sushi.zip")
```
Excellent!
Now we've got the same dataset we've been using previously, a series of images of pizza, steak and sushi in standard image classification format.
Let's now create paths to our training and test directories.
```
# Setup Dirs
train_dir = image_path / "train"
test_dir = image_path / "test"
```
## 2. Create Datasets and DataLoaders
Since we've downloaded the `going_modular` directory, we can use the [`data_setup.py`](https://github.com/mrdbourke/pytorch-deep-learning/blob/main/going_modular/going_modular/data_setup.py) script we created in section [05. PyTorch Going Modular](https://www.learnpytorch.io/05_pytorch_going_modular/#2-create-datasets-and-dataloaders-data_setuppy) to prepare and setup our DataLoaders.
But since we'll be using a pretrained model from [`torchvision.models`](https://pytorch.org/vision/stable/models.html), there's a specific transform we need to prepare our images first.
### 2.1 Creating a transform for `torchvision.models`
When using a pretrained model, it's important that **your custom data going into the model is prepared in the same way as the original training data that went into the model**.
For the case of pretrained models in `torchvision.models`, the documentation states:
> All pre-trained models expect input images normalized in the same way, i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), where H and W are expected to be at least 224.
>
> The images have to be loaded in to a range of `[0, 1]` and then normalized using `mean = [0.485, 0.456, 0.406]` and `std = [0.229, 0.224, 0.225]`.
>
> You can use the following transform to normalize:
>
> ```
> normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
> std=[0.229, 0.224, 0.225])
> ```
The good news is, we can achieve the above transformations with a combination of:
| **Transform number** | **Transform required** | **Code to perform transform** |
| ----- | ----- | ----- |
| 1 | Mini-batches of size `[batch_size, 3, height, width]` where height and width are at least 224x224^. | `torchvision.transforms.Resize()` to resize images into `[3, 224, 224]`^ and `torch.utils.data.DataLoader()` to create batches of images. |
| 2 | Values between 0 & 1. | `torchvision.transforms.ToTensor()` |
| 3 | A mean of `[0.485, 0.456, 0.406]` (values across each colour channel). | `torchvision.transforms.Normalize(mean=...)` to adjust the mean of our images. |
| 4 | A standard deviation of `[0.229, 0.224, 0.225]` (values across each colour channel). | `torchvision.transforms.Normalize(std=...)` to adjust the standard deviation of our images. |
> **Note:** ^some pretrained models from `torchvision.models` in different sizes to `[3, 224, 224]`, for example, some might take them in `[3, 240, 240]`. For specific input image sizes, see the documentation.
> **Question:** *Where did the mean and standard deviation values come from? Why do we need to do this?*
>
> These were calculated from the data. Specifically, the ImageNet dataset by taking the means and standard deviations across a subset of images.
>
> We also don't *need* to do this. Neural networks are usually quite capable of figuring out appropriate data distributions (they'll calculate where the mean and standard deviations need to be on their own) but setting them at the start can help our networks achieve better performance quicker.
Let's compose a series of `torchvision.transforms` to perform the above steps.
```
# Create a transforms pipeline
simple_transform = transforms.Compose([
transforms.Resize((224, 224)), # 1. Reshape all images to 224x224 (though some models may require different sizes)
transforms.ToTensor(), # 2. Turn image values to between 0 & 1
transforms.Normalize(mean=[0.485, 0.456, 0.406], # 3. A mean of [0.485, 0.456, 0.406] (across each colour channel)
std=[0.229, 0.224, 0.225]) # 4. A standard deviation of [0.229, 0.224, 0.225] (across each colour channel),
])
```
Wonderful!
Now we've got a series of transforms ready to prepare our images, let's create training and testing DataLoaders.
We can create these using the `create_dataloaders` function from the [`data_setup.py`](https://github.com/mrdbourke/pytorch-deep-learning/blob/main/going_modular/going_modular/data_setup.py) script we created in [05. PyTorch Going Modular Part 2](https://www.learnpytorch.io/05_pytorch_going_modular/#2-create-datasets-and-dataloaders-data_setuppy).
We'll set `batch_size=32` so our model see's mini-batches of 32 samples at a time.
And we can transform our images using the transform pipeline we created above by setting `transform=simple_transform`.
```
# Create training and testing DataLoader's as well as get a list of class names
train_dataloader, test_dataloader, class_names = data_setup.create_dataloaders(train_dir=train_dir,
test_dir=test_dir,
transform=simple_transform, # resize, convert images to between 0 & 1 and normalize them
batch_size=32) # set mini-batch size to 32
train_dataloader, test_dataloader, class_names
```
## 3. Getting a pretrained model
Alright, here comes the fun part!
Over the past few notebooks we've been building PyTorch neural networks from scratch.
And while that's a good skill to have, our models haven't been performing as well as we'd like.
That's where **transfer learning** comes in.
The whole idea of transfer learning is to **take an already well-performing model on a problem-space similar to yours and then customising it to your use case**.
Since we're working on a computer vision problem (image classification with FoodVision Mini), we can find pretrained classification models in [`torchvision.models`](https://pytorch.org/vision/stable/models.html#classification).
Exploring the documentation, you'll find plenty of common computer vision architecture backbones such as:
| **Architecuture backbone** | **Code** |
| ----- | ----- |
| [ResNet](https://arxiv.org/abs/1512.03385)'s | `torchvision.models.resnet18()`, `torchvision.models.resnet50()`... |
| [VGG](https://arxiv.org/abs/1409.1556) (similar to what we used for TinyVGG) | `torchvision.models.vgg16()` |
| [EfficientNet](https://arxiv.org/abs/1905.11946)'s | `torchvision.models.efficientnet_b0()`, `torchvision.models.efficientnet_b1()`... |
| [VisionTransformer](https://arxiv.org/abs/2010.11929) (ViT's)| `torchvision.models.vit_b_16()`, `torchvision.models.vit_b_32()`... |
| [ConvNeXt](https://arxiv.org/abs/2201.03545) | `torchvision.models.convnext_tiny()`, `torchvision.models.convnext_small()`... |
| More available in `torchvision.models` | `torchvision.models...` |
### 3.1 Which pretrained model should you use?
It depends on your problem/the device you're working with.
Generally, the higher number in the model name (e.g. `efficientnet_b0()` -> `efficientnet_b1()` -> `efficientnet_b7()`) means *better performance* but a *larger* model.
You might think better performance is *always better*, right?
That's true but **some better performing models are too big for some devices**.
For example, say you'd like to run your model on a mobile-device, you'll have to take into account the limited compute resources on the device, thus you'd be looking for a smaller model.
But if you've got unlimited compute power, as [*The Bitter Lesson*](http://www.incompleteideas.net/IncIdeas/BitterLesson.html) states, you'd likely take the biggest, most compute hungry model you can.
Understanding this **performance vs. speed vs. size tradeoff** will come with time and practice.
For me, I've found a nice balance in the `efficientnet_bX` models.
As of May 2022, [Nutrify](https://nutrify.app) (the machine learning powered app I'm working on) is powered by an `efficientnet_b0`.
[Comma.ai](https://comma.ai/) (a company that makes open source self-driving car software) [uses an `efficientnet_b2`](https://geohot.github.io/blog/jekyll/update/2021/10/29/an-architecture-for-life.html) to learn a representation of the road.
> **Note:** Even though we're using `efficientnet_bX`, it's important not to get too attached to any one architecture, as they are always changing as new research gets released. Best to experiment, experiment, experiment and see what works for your problem.
### 3.2 Setting up a pretrained model
The pretrained model we're going to be using is [`torchvision.models.efficientnet_b0()`](https://pytorch.org/vision/stable/generated/torchvision.models.efficientnet_b0.html#torchvision.models.efficientnet_b0).
The architecture is from the paper *[EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946)*.
<img src="https://raw.githubusercontent.com/mrdbourke/pytorch-deep-learning/main/images/06-effnet-b0-feature-extractor.png" alt="efficienet_b0 from PyTorch torchvision feature extraction model" width=900/>
*Example of what we're going to create, a pretrained [EfficientNetB0 model](https://ai.googleblog.com/2019/05/efficientnet-improving-accuracy-and.html) from `torchvision.models` with the output layer adjusted for our use case of classifying pizza, steak and sushi images.*
To setup the model and make sure it comes with pretrained weights, we can set `pretrained=True`, doing so will download a set of model weights learned from the ImageNet dataset.
This means the model has already been trained on millions of images and has a good base representation of image data.
The PyTorch version of this pretrained model is capable of achieving ~77.7% accuracy across ImageNet's 1000 classes.
> **Note:** As of December 2021, the PyTorch team announced a [new mutli-weight API for TorchVision](https://pytorch.org/blog/introducing-torchvision-new-multi-weight-support-api/), allowing different sets of model weights (e.g. weights learned from different datasets) to be used for pretrained models. Though as of May 2022, this new API is not released. But even with the new API, the idea still remains: take the weights/patterns from one model and apply it to your own.
We'll also send it to the target device.
```
# Setup the model with pretrained weights and send it to the target device
model = torchvision.models.efficientnet_b0(pretrained=True).to(device)
#model # uncomment to output (it's very long)
```
If we print the model, we get something similar to the following:
<img src="https://raw.githubusercontent.com/mrdbourke/pytorch-deep-learning/main/images/06-torchvision-effnet-print-out.png" alt="output of printing the efficientnet_b0 model from torchvision.models" width=900/>
Lots and lots and lots of layers.
This is one of the benefits of transfer learning, taking an existing model, that's been crafted by some of the best engineers in the world and applying to your own problem.
Our `efficientnet_b0` comes in three main parts:
1. `features` - A collection of convolutional layers and other various activation layers to learn a base representation of vision data (this base representation/collection of layers is often referred to as **features** or **feature extractor**, "the base layers of the model learn the different **features** of images").
2. `avgpool` - Takes the average of the output of the `features` layer(s) and turns it into a **feature vector**.
3. `classifier` - Turns the **feature vector** into a vector with the same dimensionality as the number of required output classes (since `efficientnet_b0` is pretrained on ImageNet and because ImageNet has 1000 classes, `out_features=1000` is the default).
### 3.3 Getting a summary of our model with `torchinfo.summary()`
To learn more about our model, let's use `torchinfo`'s [`summary()` method](https://github.com/TylerYep/torchinfo#documentation).
To do so, we'll pass in:
* `model` - the model we'd like to get a summary of.
* `input_size` - the shape of the data we'd like to pass to our model, for the case of `efficientnet_b0`, the input size is `(batch_size, 3, 224, 224)`, though [other variants of `efficientnet_bX` have different input sizes](https://github.com/pytorch/vision/blob/d2bfd639e46e1c5dc3c177f889dc7750c8d137c7/references/classification/train.py#L92-L93).
* **Note:** Many modern models can handle input images of varying sizes thanks to [`torch.nn.AdaptiveAvgPool2d()`](https://pytorch.org/docs/stable/generated/torch.nn.AdaptiveAvgPool2d.html), this layer adaptively adjusts the `output_size` of a given input as required. You can try this out by passing different size input images to `summary()` or your models.
* `col_names` - the various information columns we'd like to see about our model.
* `col_width` - how wide the columns should be for the summary.
* `row_settings` - what features to show in a row.
```
# Print a summary using torchinfo (uncomment for actual output)
# summary(model=model,
# input_size=(32, 3, 224, 224), # make sure this is "input_size", not "input_shape"
# # col_names=["input_size"], # uncomment for smaller output
# col_names=["input_size", "output_size", "num_params", "trainable"],
# col_width=20,
# row_settings=["var_names"]
# )
```
<img src="https://raw.githubusercontent.com/mrdbourke/pytorch-deep-learning/main/images/06-torchinfo-summary-unfrozen-layers.png" alt="output of torchinfo.summary() when passed our model with all layers as trainable" width=900/>
Woah!
Now that's a big model!
From the output of the summary, we can see all of the various input and output shape changes as our image data goes through the model.
And there are a whole bunch more total parameters (pretrained weights) to recognize different patterns in our data.
For reference, our model from previous sections, **TinyVGG had 8,083 parameters vs. 5,288,548 parameters for `efficientnet_b0`, an increase of ~654x**!
What do you think, will this mean better performance?
### 3.4 Freezing the base model and changing the output layer to suit our needs
The process of transfer learning usually goes: freeze some base layers of a pretrained model (typically the `features` section) and then adjust the output layers (also called head/classifier layers) to suit your needs.
<img src="https://raw.githubusercontent.com/mrdbourke/pytorch-deep-learning/main/images/06-effnet-changing-the-classifier-head.png" alt="changing the efficientnet classifier head to a custom number of outputs" width=900/>
*You can customise the outputs of a pretrained model by changing the output layer(s) to suit your problem. The original `torchvision.models.efficientnet_b0()` comes with `out_features=1000` because there are 1000 classes in ImageNet, the dataset it was trained on. However, for our problem, classifying images of pizza, steak and sushi we only need `out_features=3`.*
Let's freeze all of the layers/parameters in the `features` section of our `efficientnet_b0` model.
> **Note:** To *freeze* layers means to keep them how they are during training. For example, if your model has pretrained layers, to *freeze* them would be to say, "don't change any of the patterns in these layers during training, keep them how they are." In essence, we'd like to keep the pretrained weights/patterns our model has learned from ImageNet as a backbone and then only change the output layers.
We can freeze all of the layers/parameters in the `features` section by setting the attribute `requires_grad=False`.
For parameters with `requires_grad=False`, PyTorch doesn't track gradient updates and in turn, these parameters won't be changed by our optimizer during training.
In essence, a parameter with `requires_grad=False` is "untrainable" or "frozen" in place.
```
# Freeze all base layers in the "features" section of the model (the feature extractor) by setting requires_grad=False
for param in model.features.parameters():
param.requires_grad = False
```
Feature extractor layers frozen!
Let's now adjust the output layer or the `classifier` portion of our pretrained model to our needs.
Right now our pretrained model has `out_features=1000` because there are 1000 classes in ImageNet.
However, we don't have 1000 classes, we only have three, pizza, steak and sushi.
We can change the `classifier` portion of our model by creating a new series of layers.
The current `classifier` consists of:
```
(classifier): Sequential(
(0): Dropout(p=0.2, inplace=True)
(1): Linear(in_features=1280, out_features=1000, bias=True)
```
We'll keep the `Dropout` layer the same using [`torch.nn.Dropout(p=0.2, inplace=True)`](https://pytorch.org/docs/stable/generated/torch.nn.Dropout.html).
> **Note:** [Dropout layers](https://developers.google.com/machine-learning/glossary#dropout_regularization) randomly remove connections between two neural network layers with a probability of `p`. For example, if `p=0.2`, 20% of connections between neural network layers will be removed at random each pass. This practice is meant to help regularize (prevent overfitting) a model by making sure the connections that remain learn features to compensate for the removal of the other connections (hopefully these remaining features are *more general*).
And we'll keep `in_features=1280` for our `Linear` output layer but we'll change the `out_features` value to the length of our `class_names` (`len(['pizza', 'steak', 'sushi']) = 3`).
Our new `classifier` layer should be on the same device as our `model`.
```
# Set the manual seeds
torch.manual_seed(42)
torch.cuda.manual_seed(42)
# Get the length of class_names (one output unit for each class)
output_shape = len(class_names)
# Recreate the classifier layer and seed it to the target device
model.classifier = torch.nn.Sequential(
torch.nn.Dropout(p=0.2, inplace=True),
torch.nn.Linear(in_features=1280,
out_features=output_shape, # same number of output units as our number of classes
bias=True)).to(device)
```
Nice!
Output layer updated, let's get another summary of our model and see what's changed.
```
# # Do a summary *after* freezing the features and changing the output classifier layer (uncomment for actual output)
# summary(model,
# input_size=(32, 3, 224, 224), # make sure this is "input_size", not "input_shape" (batch_size, color_channels, height, width)
# verbose=0,
# col_names=["input_size", "output_size", "num_params", "trainable"],
# col_width=20,
# row_settings=["var_names"]
# )
```
<img src="https://raw.githubusercontent.com/mrdbourke/pytorch-deep-learning/main/images/06-torchinfo-summary-frozen-layers.png" alt="output of torchinfo.summary() after freezing multiple layers in our model and changing the classifier head" width=900/>
Ho, ho! There's a fair few changes here!
Let's go through them:
* **Trainable column** - You'll see that many of the base layers (the ones in the `features` portion) have their Trainable value as `False`. This is because we set their attribute `requires_grad=False`. Unless we change this, these layers won't be updated during furture training.
* **Output shape of `classifier`** - The `classifier` portion of the model now has an Output Shape value of `[32, 3]` instead of `[32, 1000]`. It's Trainable value is also `True`. This means its parameters will be updated during training. In essence, we're using the `features` portion to feed our `classifier` portion a base representation of an image and then our `classifier` layer is going to learn how to base representation aligns with our problem.
* **Less trainable parameters** - Previously there was 5,288,548 trainable parameters. But since we froze many of the layers of the model and only left the `classifier` as trainable, there's now only 3,843 trainable parameters (even less than our TinyVGG model). Though there's also 4,007,548 non-trainable parameters, these will create a base representation of our input images to feed into our `classifier` layer.
> **Note:** The more trainable parameters a model has, the more compute power/longer it takes to train. Freezing the base layers of our model and leaving it with less trainable parameters means our model should train quite quickly. This is one huge benefit of transfer learning, taking the already learned parameters of a model trained on a problem similar to yours and only tweaking the outputs slightly to suit your problem.
## 4. Train model
Now we've got a pretraiend model that's semi-frozen and has a customised `classifier`, how about we see transfer learning in action?
To begin training, let's create a loss function and an optimizer.
Because we're still working with multi-class classification, we'll use `nn.CrossEntropyLoss()` for the loss function.
And we'll stick with `torch.optim.Adam()` as our optimizer with `lr=0.001`.
```
# Define loss and optimizer
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
```
Wonderful!
To train our model, we can use `train()` function we defined in the [05. PyTorch Going Modular section 04](https://www.learnpytorch.io/05_pytorch_going_modular/#4-creating-train_step-and-test_step-functions-and-train-to-combine-them).
The `train()` function is in the [`engine.py`](https://github.com/mrdbourke/pytorch-deep-learning/blob/main/going_modular/going_modular/engine.py) script inside the [`going_modular` directory](https://github.com/mrdbourke/pytorch-deep-learning/tree/main/going_modular/going_modular).
Let's see how long it takes to train our model for 5 epochs.
> **Note:** We're only going to be training the parameters `classifier` here as all of the other parameters in our model have been frozen.
```
# Set the random seeds
torch.manual_seed(42)
torch.cuda.manual_seed(42)
# Start the timer
from timeit import default_timer as timer
start_time = timer()
# Setup training and save the results
results = engine.train(model=model,
train_dataloader=train_dataloader,
test_dataloader=test_dataloader,
optimizer=optimizer,
loss_fn=loss_fn,
epochs=5,
device=device)
# End the timer and print out how long it took
end_time = timer()
print(f"[INFO] Total training time: {end_time-start_time:.3f} seconds")
```
Wow!
Our model trained quite fast (~5 seconds on my local machine with a [NVIDIA TITAN RTX GPU](https://www.nvidia.com/en-au/deep-learning-ai/products/titan-rtx/)/about 15 seconds on Google Colab with a [NVIDIA P100 GPU](https://www.nvidia.com/en-au/data-center/tesla-p100/)).
And it looks like it smashed our previous model results out of the park!
With an `efficientnet_b0` backbone, our model achieves almost 90% accuracy on the test dataset, almost *double* what we were able to achieve with TinyVGG.
Not bad for a model we downloaded with a few lines of code.
## 5. Evaluate model by plotting loss curves
Our model looks like it's performing pretty well.
Let's plot it's loss curves to see what the training looks like over time.
We can plot the loss curves using the function `plot_loss_curves()` we created in [04. PyTorch Custom Datasets section 7.8](https://www.learnpytorch.io/04_pytorch_custom_datasets/#78-plot-the-loss-curves-of-model-0).
The function is stored in the [`helper_functions.py`](https://github.com/mrdbourke/pytorch-deep-learning/blob/main/helper_functions.py) script so we'll try to import it and download the script if we don't have it.
```
# Get the plot_loss_curves() function from helper_functions.py, download the file if we don't have it
try:
from helper_functions import plot_loss_curves
except:
print("[INFO] Couldn't find helper_functions.py, downloading...")
with open("helper_functions.py", "wb") as f:
import requests
request = requests.get("https://raw.githubusercontent.com/mrdbourke/pytorch-deep-learning/main/helper_functions.py")
f.write(request.content)
from helper_functions import plot_loss_curves
# Plot the loss curves of our model
plot_loss_curves(results)
```
Those are some excellent looking loss curves!
It looks like the loss for both datasets (train and test) is heading in the right direction.
The same with the accuracy values, trending upwards.
That goes to show the power of **transfer learning**. Using a pretrained model often leads to pretty good results with a small amount of data in less time.
I wonder what would happen if you tried to train the model for longer? Or if we added more data?
> **Question:** Looking at the loss curves, does our model look like it's overfitting or underfitting? Or perhaps neither? Hint: Check out notebook [04. PyTorch Custom Datasets part 8. What should an ideal loss curve look like?](https://www.learnpytorch.io/04_pytorch_custom_datasets/#8-what-should-an-ideal-loss-curve-look-like) for ideas.
## 6. Make predictions on images from the test set
It looks like our model performs well quantitatively but how about qualitatively?
Let's find out by making some predictions with our model on images from the test set (these aren't seen during training) and plotting them.
*Visualize, visualize, visualize!*
One thing we'll have to remember is that for our model to make predictions on an image, the image has to be in *same* format as the images our model was trained on.
This means we'll need to make sure our images have:
* **Same shape** - If our images are different shapes to what our model was trained on, we'll get shape errors.
* **Same datatype** - If our images are a different datatype (e.g. `torch.int8` vs. `torch.float32`) we'll get datatype errors.
* **Same device** - If our images are on a different device to our model, we'll get device errors.
* **Same transformations** - If our model is trained on images that have been transformed in certain way (e.g. normalized with a specific mean and standard deviation) and we try and make preidctions on images transformed in a different way, these predictions may be off.
> **Note:** These requirements go for all kinds of data if you're trying to make predictions with a trained model. Data you'd like to predict on should be in the same format as your model was trained on.
To do all of this, we'll create a function `pred_and_plot_image()` to:
1. Take in a trained model, a list of class names, a filepath to a target image, an image size, a transform and a target device.
2. Open an image with [`PIL.Image.open()`](https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.open).
3. Create a transform for the image (this will default to the same transform we used on our images to train our model).
4. Make sure the model is on the target device.
5. Turn on model eval mode with `model.eval()` (this turns off layers like `nn.Dropout()`, so they aren't used for inference) and the inference mode context manager.
6. Tranform the target image with the transform made in step 3 and add an extra batch dimension with `torch.unsqueeze(dim=0)` so our input image has shape `[batch_size, color_channels, height, width]`.
7. Make a prediction on the image by passing it to the model ensuring it's on the target device.
8. Convert the model's output logits to prediction probabilities with `torch.softmax()`.
9. Convert model's prediction probabilities to prediction labels with `torch.argmax()`.
10. Plot the image with `matplotlib` and set the title to the prediction label from step 9 and prediction probability from step 8.
> **Note:** This is a similar function to [04. PyTorch Custom Datasets section 11.3's](https://www.learnpytorch.io/04_pytorch_custom_datasets/#113-putting-custom-image-prediction-together-building-a-function) `pred_and_plot_image()` with a few tweaked steps.
```
from typing import List, Tuple
from PIL import Image
# 1. Take in a trained model, class names, image path, image size, a transform and target device
def pred_and_plot_image(model: torch.nn.Module,
class_names: List[str],
image_path: str,
image_size: Tuple[int, int] = (224, 224),
transform: torchvision.transforms = None,
device: torch.device=device):
# 2. Open image
img = Image.open(image_path)
# 3. Create transformation for image (if one doesn't exist)
if transform is not None:
image_transform = transform
else:
image_transform = transforms.Compose([
transforms.Resize(image_size),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
### Predict on image ###
# 4. Make sure the model is on the target device
model.to(device)
# 5. Turn on model evaluation mode and inference mode
model.eval()
with torch.inference_mode():
# 6. Transform and add an extra dimension to image (model requires samples in [batch_size, color_channels, height, width])
transformed_image = image_transform(img).unsqueeze(dim=0)
# 7. Make a prediction on image with an extra dimension and send it to the target device
target_image_pred = model(transformed_image.to(device))
# 8. Convert logits -> prediction probabilities (using torch.softmax() for multi-class classification)
target_image_pred_probs = torch.softmax(target_image_pred, dim=1)
# 9. Convert prediction probabilities -> prediction labels
target_image_pred_label = torch.argmax(target_image_pred_probs, dim=1)
# 10. Plot image with predicted label and probability
plt.figure()
plt.imshow(img)
plt.title(f"Pred: {class_names[target_image_pred_label]} | Prob: {target_image_pred_probs.max():.3f}")
plt.axis(False);
```
What a good looking function!
Let's test it out by making predictions on a few random images from the test set.
We can get a list of all the test image paths using `list(Path(test_dir).glob("*/*.jpg"))`, the stars in the `glob()` method say "any file matching this pattern", in other words, any file ending in `.jpg` (all of our images).
And then we can randomly sample a number of these using Python's [`random.sample(populuation, k)`](https://docs.python.org/3/library/random.html#random.sample) where `population` is the sequence to sample and `k` is the number of samples to retrieve.
```
# Get a random list of image paths from test set
import random
num_images_to_plot = 3
test_image_path_list = list(Path(test_dir).glob("*/*.jpg")) # get list all image paths from test data
test_image_path_sample = random.sample(population=test_image_path_list, # go through all of the test image paths
k=num_images_to_plot) # randomly select 'k' image paths to pred and plot
# Make predictions on and plot the images
for image_path in test_image_path_sample:
pred_and_plot_image(model=model,
image_path=image_path,
class_names=class_names,
image_size=(224, 224))
```
Woohoo!
Those predictions look far better than the ones our TinyVGG model was previously making.
### 6.1 Making predictions on a custom image
It looks like our model does well qualitatively on data from the test set.
But how about on our own custom image?
That's where the real fun of machine learning is!
Predicting on your own custom data, outisde of any training or test set.
To test our model on a custom image, let's import the old faithful `pizza-dad.jpeg` image (an image of my dad eating pizza).
We'll then pass it to the `pred_and_plot_image()` function we created above and see what happens.
```
# Download custom image
import requests
# Setup custom image path
custom_image_path = data_path / "04-pizza-dad.jpeg"
# Download the image if it doesn't already exist
if not custom_image_path.is_file():
with open(custom_image_path, "wb") as f:
# When downloading from GitHub, need to use the "raw" file link
request = requests.get("https://raw.githubusercontent.com/mrdbourke/pytorch-deep-learning/main/images/04-pizza-dad.jpeg")
print(f"Downloading {custom_image_path}...")
f.write(request.content)
else:
print(f"{custom_image_path} already exists, skipping download.")
# Predict on custom image
pred_and_plot_image(model=model,
image_path=custom_image_path,
class_names=class_names)
```
Two thumbs up!
Looks like our model go it right again!
But this time the prediction probability is higher than the one from TinyVGG (`0.373` vs. `0.559`) in [04. PyTorch Custom Datasets section 11.3](https://www.learnpytorch.io/04_pytorch_custom_datasets/#113-putting-custom-image-prediction-together-building-a-function).
This indicates our `efficientnet_b0` model is *more* confident in its prediction where as our TinyVGG model was par with just guessing.
## Main takeaways
* **Transfer learning** often allows to you get good results with a relatively small amount of custom data.
* Knowing the power of transfer learning, it's a good idea to ask at the start of every problem, "does an existing well-performing model exist for my problem?"
* When using a pretrained model, it's important that your custom data be formatted/preprocessed in the same way that the original model was trained on, otherwise you may get degraded performance.
* The same goes for predicting on custom data, ensure your custom data is in the same format as the data your model was trained on.
* There are [several different places to find pretrained models](https://www.learnpytorch.io/06_pytorch_transfer_learning/#where-to-find-pretrained-models) from the PyTorch domain libraries, HuggingFace Hub and libraries such as `timm` (PyTorch Image Models).
## Exercises
All of the exercises are focused on practicing the code above.
You should be able to complete them by referencing each section or by following the resource(s) linked.
All exercises should be completed using [device-agnostic code](https://pytorch.org/docs/stable/notes/cuda.html#device-agnostic-code).
**Resources:**
* [Exercise template notebook for 06](https://github.com/mrdbourke/pytorch-deep-learning/blob/main/extras/exercises/06_pytorch_transfer_learning_exercises.ipynb)
* [Example solutions notebook for 06](https://github.com/mrdbourke/pytorch-deep-learning/blob/main/extras/solutions/06_pytorch_transfer_learning_exercise_solutions.ipynb) (try the exercises *before* looking at this)
* See a live [video walkthrough of the solutions on YouTube](https://youtu.be/ueLolShyFqs) (errors and all)
1. Make predictions on the entire test dataset and plot a confusion matrix for the results of our model compared to the truth labels. Check out [03. PyTorch Computer Vision section 10](https://www.learnpytorch.io/03_pytorch_computer_vision/#10-making-a-confusion-matrix-for-further-prediction-evaluation) for ideas.
2. Get the "most wrong" of the predictions on the test dataset and plot the 5 "most wrong" images. You can do this by:
* Predicting across all of the test dataset, storing the labels and predicted probabilities.
* Sort the predictions by *wrong prediction* and then *descending predicted probabilities*, this will give you the wrong predictions with the *highest* prediction probabilities, in other words, the "most wrong".
* Plot the top 5 "most wrong" images, why do you think the model got these wrong?
3. Predict on your own image of pizza/steak/sushi - how does the model go? What happens if you predict on an image that isn't pizza/steak/sushi?
4. Train the model from section 4 above for longer (10 epochs should do), what happens to the performance?
5. Train the model from section 4 above with more data, say 20% of the images from Food101 of Pizza, Steak and Sushi images.
* You can find the [20% Pizza, Steak, Sushi dataset](https://github.com/mrdbourke/pytorch-deep-learning/blob/main/data/pizza_steak_sushi_20_percent.zip) on the course GitHub. It was created with the notebook [`extras/04_custom_data_creation.ipynb`](https://github.com/mrdbourke/pytorch-deep-learning/blob/main/extras/04_custom_data_creation.ipynb).
6. Try a different model from [`torchvision.models`](https://pytorch.org/vision/stable/models.html) on the Pizza, Steak, Sushi data, how does this model perform?
* You'll have to change the size of the classifier layer to suit our problem.
* You may want to try an EfficientNet with a higher number than our B0, perhaps `torchvision.models.efficientnet_b2()`?
## Extra-curriculum
* Look up what "model fine-tuning" is and spend 30-minutes researching different methods to perform it with PyTorch. How would we change our code to fine-tine? Tip: fine-tuning usually works best if you have *lots* of custom data, where as, feature extraction is typically better if you have less custom data.
* Check out the new/upcoming [PyTorch multi-weights API](https://pytorch.org/blog/introducing-torchvision-new-multi-weight-support-api/) (still in beta at time of writing, May 2022), it's a new way to perform transfer learning in PyTorch. What changes to our code would need to made to use the new API?
* Try to create your own classifier on two classes of images, for example, you could collect 10 photos of your dog and your friends dog and train a model to classify the two dogs. This would be a good way to practice creating a dataset as well as building a model on that dataset.
|
github_jupyter
|
import matplotlib.pyplot as plt
import torch
import torchvision
from torch import nn
from torchvision import transforms
# Try to get torchinfo, install it if it doesn't work
try:
from torchinfo import summary
except:
print("[INFO] Couldn't find torchinfo... installing it.")
!pip install -q torchinfo
from torchinfo import summary
# Try to import the going_modular directory, download it from GitHub if it doesn't work
try:
from going_modular.going_modular import data_setup, engine
except:
# Get the going_modular scripts
print("[INFO] Couldn't find going_modular scripts... downloading them from GitHub.")
!git clone https://github.com/mrdbourke/pytorch-deep-learning
!mv pytorch-deep-learning/going_modular .
!rm -rf pytorch-deep-learning
from going_modular.going_modular import data_setup, engine
# Setup device agnostic code
device = "cuda" if torch.cuda.is_available() else "cpu"
device
import os
import zipfile
from pathlib import Path
import requests
# Setup path to data folder
data_path = Path("data/")
image_path = data_path / "pizza_steak_sushi"
# If the image folder doesn't exist, download it and prepare it...
if image_path.is_dir():
print(f"{image_path} directory exists.")
else:
print(f"Did not find {image_path} directory, creating one...")
image_path.mkdir(parents=True, exist_ok=True)
# Download pizza, steak, sushi data
with open(data_path / "pizza_steak_sushi.zip", "wb") as f:
request = requests.get("https://github.com/mrdbourke/pytorch-deep-learning/raw/main/data/pizza_steak_sushi.zip")
print("Downloading pizza, steak, sushi data...")
f.write(request.content)
# Unzip pizza, steak, sushi data
with zipfile.ZipFile(data_path / "pizza_steak_sushi.zip", "r") as zip_ref:
print("Unzipping pizza, steak, sushi data...")
zip_ref.extractall(image_path)
# Remove .zip file
os.remove(data_path / "pizza_steak_sushi.zip")
# Setup Dirs
train_dir = image_path / "train"
test_dir = image_path / "test"
> normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
> std=[0.229, 0.224, 0.225])
> ```
The good news is, we can achieve the above transformations with a combination of:
| **Transform number** | **Transform required** | **Code to perform transform** |
| ----- | ----- | ----- |
| 1 | Mini-batches of size `[batch_size, 3, height, width]` where height and width are at least 224x224^. | `torchvision.transforms.Resize()` to resize images into `[3, 224, 224]`^ and `torch.utils.data.DataLoader()` to create batches of images. |
| 2 | Values between 0 & 1. | `torchvision.transforms.ToTensor()` |
| 3 | A mean of `[0.485, 0.456, 0.406]` (values across each colour channel). | `torchvision.transforms.Normalize(mean=...)` to adjust the mean of our images. |
| 4 | A standard deviation of `[0.229, 0.224, 0.225]` (values across each colour channel). | `torchvision.transforms.Normalize(std=...)` to adjust the standard deviation of our images. |
> **Note:** ^some pretrained models from `torchvision.models` in different sizes to `[3, 224, 224]`, for example, some might take them in `[3, 240, 240]`. For specific input image sizes, see the documentation.
> **Question:** *Where did the mean and standard deviation values come from? Why do we need to do this?*
>
> These were calculated from the data. Specifically, the ImageNet dataset by taking the means and standard deviations across a subset of images.
>
> We also don't *need* to do this. Neural networks are usually quite capable of figuring out appropriate data distributions (they'll calculate where the mean and standard deviations need to be on their own) but setting them at the start can help our networks achieve better performance quicker.
Let's compose a series of `torchvision.transforms` to perform the above steps.
Wonderful!
Now we've got a series of transforms ready to prepare our images, let's create training and testing DataLoaders.
We can create these using the `create_dataloaders` function from the [`data_setup.py`](https://github.com/mrdbourke/pytorch-deep-learning/blob/main/going_modular/going_modular/data_setup.py) script we created in [05. PyTorch Going Modular Part 2](https://www.learnpytorch.io/05_pytorch_going_modular/#2-create-datasets-and-dataloaders-data_setuppy).
We'll set `batch_size=32` so our model see's mini-batches of 32 samples at a time.
And we can transform our images using the transform pipeline we created above by setting `transform=simple_transform`.
## 3. Getting a pretrained model
Alright, here comes the fun part!
Over the past few notebooks we've been building PyTorch neural networks from scratch.
And while that's a good skill to have, our models haven't been performing as well as we'd like.
That's where **transfer learning** comes in.
The whole idea of transfer learning is to **take an already well-performing model on a problem-space similar to yours and then customising it to your use case**.
Since we're working on a computer vision problem (image classification with FoodVision Mini), we can find pretrained classification models in [`torchvision.models`](https://pytorch.org/vision/stable/models.html#classification).
Exploring the documentation, you'll find plenty of common computer vision architecture backbones such as:
| **Architecuture backbone** | **Code** |
| ----- | ----- |
| [ResNet](https://arxiv.org/abs/1512.03385)'s | `torchvision.models.resnet18()`, `torchvision.models.resnet50()`... |
| [VGG](https://arxiv.org/abs/1409.1556) (similar to what we used for TinyVGG) | `torchvision.models.vgg16()` |
| [EfficientNet](https://arxiv.org/abs/1905.11946)'s | `torchvision.models.efficientnet_b0()`, `torchvision.models.efficientnet_b1()`... |
| [VisionTransformer](https://arxiv.org/abs/2010.11929) (ViT's)| `torchvision.models.vit_b_16()`, `torchvision.models.vit_b_32()`... |
| [ConvNeXt](https://arxiv.org/abs/2201.03545) | `torchvision.models.convnext_tiny()`, `torchvision.models.convnext_small()`... |
| More available in `torchvision.models` | `torchvision.models...` |
### 3.1 Which pretrained model should you use?
It depends on your problem/the device you're working with.
Generally, the higher number in the model name (e.g. `efficientnet_b0()` -> `efficientnet_b1()` -> `efficientnet_b7()`) means *better performance* but a *larger* model.
You might think better performance is *always better*, right?
That's true but **some better performing models are too big for some devices**.
For example, say you'd like to run your model on a mobile-device, you'll have to take into account the limited compute resources on the device, thus you'd be looking for a smaller model.
But if you've got unlimited compute power, as [*The Bitter Lesson*](http://www.incompleteideas.net/IncIdeas/BitterLesson.html) states, you'd likely take the biggest, most compute hungry model you can.
Understanding this **performance vs. speed vs. size tradeoff** will come with time and practice.
For me, I've found a nice balance in the `efficientnet_bX` models.
As of May 2022, [Nutrify](https://nutrify.app) (the machine learning powered app I'm working on) is powered by an `efficientnet_b0`.
[Comma.ai](https://comma.ai/) (a company that makes open source self-driving car software) [uses an `efficientnet_b2`](https://geohot.github.io/blog/jekyll/update/2021/10/29/an-architecture-for-life.html) to learn a representation of the road.
> **Note:** Even though we're using `efficientnet_bX`, it's important not to get too attached to any one architecture, as they are always changing as new research gets released. Best to experiment, experiment, experiment and see what works for your problem.
### 3.2 Setting up a pretrained model
The pretrained model we're going to be using is [`torchvision.models.efficientnet_b0()`](https://pytorch.org/vision/stable/generated/torchvision.models.efficientnet_b0.html#torchvision.models.efficientnet_b0).
The architecture is from the paper *[EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946)*.
<img src="https://raw.githubusercontent.com/mrdbourke/pytorch-deep-learning/main/images/06-effnet-b0-feature-extractor.png" alt="efficienet_b0 from PyTorch torchvision feature extraction model" width=900/>
*Example of what we're going to create, a pretrained [EfficientNetB0 model](https://ai.googleblog.com/2019/05/efficientnet-improving-accuracy-and.html) from `torchvision.models` with the output layer adjusted for our use case of classifying pizza, steak and sushi images.*
To setup the model and make sure it comes with pretrained weights, we can set `pretrained=True`, doing so will download a set of model weights learned from the ImageNet dataset.
This means the model has already been trained on millions of images and has a good base representation of image data.
The PyTorch version of this pretrained model is capable of achieving ~77.7% accuracy across ImageNet's 1000 classes.
> **Note:** As of December 2021, the PyTorch team announced a [new mutli-weight API for TorchVision](https://pytorch.org/blog/introducing-torchvision-new-multi-weight-support-api/), allowing different sets of model weights (e.g. weights learned from different datasets) to be used for pretrained models. Though as of May 2022, this new API is not released. But even with the new API, the idea still remains: take the weights/patterns from one model and apply it to your own.
We'll also send it to the target device.
If we print the model, we get something similar to the following:
<img src="https://raw.githubusercontent.com/mrdbourke/pytorch-deep-learning/main/images/06-torchvision-effnet-print-out.png" alt="output of printing the efficientnet_b0 model from torchvision.models" width=900/>
Lots and lots and lots of layers.
This is one of the benefits of transfer learning, taking an existing model, that's been crafted by some of the best engineers in the world and applying to your own problem.
Our `efficientnet_b0` comes in three main parts:
1. `features` - A collection of convolutional layers and other various activation layers to learn a base representation of vision data (this base representation/collection of layers is often referred to as **features** or **feature extractor**, "the base layers of the model learn the different **features** of images").
2. `avgpool` - Takes the average of the output of the `features` layer(s) and turns it into a **feature vector**.
3. `classifier` - Turns the **feature vector** into a vector with the same dimensionality as the number of required output classes (since `efficientnet_b0` is pretrained on ImageNet and because ImageNet has 1000 classes, `out_features=1000` is the default).
### 3.3 Getting a summary of our model with `torchinfo.summary()`
To learn more about our model, let's use `torchinfo`'s [`summary()` method](https://github.com/TylerYep/torchinfo#documentation).
To do so, we'll pass in:
* `model` - the model we'd like to get a summary of.
* `input_size` - the shape of the data we'd like to pass to our model, for the case of `efficientnet_b0`, the input size is `(batch_size, 3, 224, 224)`, though [other variants of `efficientnet_bX` have different input sizes](https://github.com/pytorch/vision/blob/d2bfd639e46e1c5dc3c177f889dc7750c8d137c7/references/classification/train.py#L92-L93).
* **Note:** Many modern models can handle input images of varying sizes thanks to [`torch.nn.AdaptiveAvgPool2d()`](https://pytorch.org/docs/stable/generated/torch.nn.AdaptiveAvgPool2d.html), this layer adaptively adjusts the `output_size` of a given input as required. You can try this out by passing different size input images to `summary()` or your models.
* `col_names` - the various information columns we'd like to see about our model.
* `col_width` - how wide the columns should be for the summary.
* `row_settings` - what features to show in a row.
<img src="https://raw.githubusercontent.com/mrdbourke/pytorch-deep-learning/main/images/06-torchinfo-summary-unfrozen-layers.png" alt="output of torchinfo.summary() when passed our model with all layers as trainable" width=900/>
Woah!
Now that's a big model!
From the output of the summary, we can see all of the various input and output shape changes as our image data goes through the model.
And there are a whole bunch more total parameters (pretrained weights) to recognize different patterns in our data.
For reference, our model from previous sections, **TinyVGG had 8,083 parameters vs. 5,288,548 parameters for `efficientnet_b0`, an increase of ~654x**!
What do you think, will this mean better performance?
### 3.4 Freezing the base model and changing the output layer to suit our needs
The process of transfer learning usually goes: freeze some base layers of a pretrained model (typically the `features` section) and then adjust the output layers (also called head/classifier layers) to suit your needs.
<img src="https://raw.githubusercontent.com/mrdbourke/pytorch-deep-learning/main/images/06-effnet-changing-the-classifier-head.png" alt="changing the efficientnet classifier head to a custom number of outputs" width=900/>
*You can customise the outputs of a pretrained model by changing the output layer(s) to suit your problem. The original `torchvision.models.efficientnet_b0()` comes with `out_features=1000` because there are 1000 classes in ImageNet, the dataset it was trained on. However, for our problem, classifying images of pizza, steak and sushi we only need `out_features=3`.*
Let's freeze all of the layers/parameters in the `features` section of our `efficientnet_b0` model.
> **Note:** To *freeze* layers means to keep them how they are during training. For example, if your model has pretrained layers, to *freeze* them would be to say, "don't change any of the patterns in these layers during training, keep them how they are." In essence, we'd like to keep the pretrained weights/patterns our model has learned from ImageNet as a backbone and then only change the output layers.
We can freeze all of the layers/parameters in the `features` section by setting the attribute `requires_grad=False`.
For parameters with `requires_grad=False`, PyTorch doesn't track gradient updates and in turn, these parameters won't be changed by our optimizer during training.
In essence, a parameter with `requires_grad=False` is "untrainable" or "frozen" in place.
Feature extractor layers frozen!
Let's now adjust the output layer or the `classifier` portion of our pretrained model to our needs.
Right now our pretrained model has `out_features=1000` because there are 1000 classes in ImageNet.
However, we don't have 1000 classes, we only have three, pizza, steak and sushi.
We can change the `classifier` portion of our model by creating a new series of layers.
The current `classifier` consists of:
We'll keep the `Dropout` layer the same using [`torch.nn.Dropout(p=0.2, inplace=True)`](https://pytorch.org/docs/stable/generated/torch.nn.Dropout.html).
> **Note:** [Dropout layers](https://developers.google.com/machine-learning/glossary#dropout_regularization) randomly remove connections between two neural network layers with a probability of `p`. For example, if `p=0.2`, 20% of connections between neural network layers will be removed at random each pass. This practice is meant to help regularize (prevent overfitting) a model by making sure the connections that remain learn features to compensate for the removal of the other connections (hopefully these remaining features are *more general*).
And we'll keep `in_features=1280` for our `Linear` output layer but we'll change the `out_features` value to the length of our `class_names` (`len(['pizza', 'steak', 'sushi']) = 3`).
Our new `classifier` layer should be on the same device as our `model`.
Nice!
Output layer updated, let's get another summary of our model and see what's changed.
<img src="https://raw.githubusercontent.com/mrdbourke/pytorch-deep-learning/main/images/06-torchinfo-summary-frozen-layers.png" alt="output of torchinfo.summary() after freezing multiple layers in our model and changing the classifier head" width=900/>
Ho, ho! There's a fair few changes here!
Let's go through them:
* **Trainable column** - You'll see that many of the base layers (the ones in the `features` portion) have their Trainable value as `False`. This is because we set their attribute `requires_grad=False`. Unless we change this, these layers won't be updated during furture training.
* **Output shape of `classifier`** - The `classifier` portion of the model now has an Output Shape value of `[32, 3]` instead of `[32, 1000]`. It's Trainable value is also `True`. This means its parameters will be updated during training. In essence, we're using the `features` portion to feed our `classifier` portion a base representation of an image and then our `classifier` layer is going to learn how to base representation aligns with our problem.
* **Less trainable parameters** - Previously there was 5,288,548 trainable parameters. But since we froze many of the layers of the model and only left the `classifier` as trainable, there's now only 3,843 trainable parameters (even less than our TinyVGG model). Though there's also 4,007,548 non-trainable parameters, these will create a base representation of our input images to feed into our `classifier` layer.
> **Note:** The more trainable parameters a model has, the more compute power/longer it takes to train. Freezing the base layers of our model and leaving it with less trainable parameters means our model should train quite quickly. This is one huge benefit of transfer learning, taking the already learned parameters of a model trained on a problem similar to yours and only tweaking the outputs slightly to suit your problem.
## 4. Train model
Now we've got a pretraiend model that's semi-frozen and has a customised `classifier`, how about we see transfer learning in action?
To begin training, let's create a loss function and an optimizer.
Because we're still working with multi-class classification, we'll use `nn.CrossEntropyLoss()` for the loss function.
And we'll stick with `torch.optim.Adam()` as our optimizer with `lr=0.001`.
Wonderful!
To train our model, we can use `train()` function we defined in the [05. PyTorch Going Modular section 04](https://www.learnpytorch.io/05_pytorch_going_modular/#4-creating-train_step-and-test_step-functions-and-train-to-combine-them).
The `train()` function is in the [`engine.py`](https://github.com/mrdbourke/pytorch-deep-learning/blob/main/going_modular/going_modular/engine.py) script inside the [`going_modular` directory](https://github.com/mrdbourke/pytorch-deep-learning/tree/main/going_modular/going_modular).
Let's see how long it takes to train our model for 5 epochs.
> **Note:** We're only going to be training the parameters `classifier` here as all of the other parameters in our model have been frozen.
Wow!
Our model trained quite fast (~5 seconds on my local machine with a [NVIDIA TITAN RTX GPU](https://www.nvidia.com/en-au/deep-learning-ai/products/titan-rtx/)/about 15 seconds on Google Colab with a [NVIDIA P100 GPU](https://www.nvidia.com/en-au/data-center/tesla-p100/)).
And it looks like it smashed our previous model results out of the park!
With an `efficientnet_b0` backbone, our model achieves almost 90% accuracy on the test dataset, almost *double* what we were able to achieve with TinyVGG.
Not bad for a model we downloaded with a few lines of code.
## 5. Evaluate model by plotting loss curves
Our model looks like it's performing pretty well.
Let's plot it's loss curves to see what the training looks like over time.
We can plot the loss curves using the function `plot_loss_curves()` we created in [04. PyTorch Custom Datasets section 7.8](https://www.learnpytorch.io/04_pytorch_custom_datasets/#78-plot-the-loss-curves-of-model-0).
The function is stored in the [`helper_functions.py`](https://github.com/mrdbourke/pytorch-deep-learning/blob/main/helper_functions.py) script so we'll try to import it and download the script if we don't have it.
Those are some excellent looking loss curves!
It looks like the loss for both datasets (train and test) is heading in the right direction.
The same with the accuracy values, trending upwards.
That goes to show the power of **transfer learning**. Using a pretrained model often leads to pretty good results with a small amount of data in less time.
I wonder what would happen if you tried to train the model for longer? Or if we added more data?
> **Question:** Looking at the loss curves, does our model look like it's overfitting or underfitting? Or perhaps neither? Hint: Check out notebook [04. PyTorch Custom Datasets part 8. What should an ideal loss curve look like?](https://www.learnpytorch.io/04_pytorch_custom_datasets/#8-what-should-an-ideal-loss-curve-look-like) for ideas.
## 6. Make predictions on images from the test set
It looks like our model performs well quantitatively but how about qualitatively?
Let's find out by making some predictions with our model on images from the test set (these aren't seen during training) and plotting them.
*Visualize, visualize, visualize!*
One thing we'll have to remember is that for our model to make predictions on an image, the image has to be in *same* format as the images our model was trained on.
This means we'll need to make sure our images have:
* **Same shape** - If our images are different shapes to what our model was trained on, we'll get shape errors.
* **Same datatype** - If our images are a different datatype (e.g. `torch.int8` vs. `torch.float32`) we'll get datatype errors.
* **Same device** - If our images are on a different device to our model, we'll get device errors.
* **Same transformations** - If our model is trained on images that have been transformed in certain way (e.g. normalized with a specific mean and standard deviation) and we try and make preidctions on images transformed in a different way, these predictions may be off.
> **Note:** These requirements go for all kinds of data if you're trying to make predictions with a trained model. Data you'd like to predict on should be in the same format as your model was trained on.
To do all of this, we'll create a function `pred_and_plot_image()` to:
1. Take in a trained model, a list of class names, a filepath to a target image, an image size, a transform and a target device.
2. Open an image with [`PIL.Image.open()`](https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.open).
3. Create a transform for the image (this will default to the same transform we used on our images to train our model).
4. Make sure the model is on the target device.
5. Turn on model eval mode with `model.eval()` (this turns off layers like `nn.Dropout()`, so they aren't used for inference) and the inference mode context manager.
6. Tranform the target image with the transform made in step 3 and add an extra batch dimension with `torch.unsqueeze(dim=0)` so our input image has shape `[batch_size, color_channels, height, width]`.
7. Make a prediction on the image by passing it to the model ensuring it's on the target device.
8. Convert the model's output logits to prediction probabilities with `torch.softmax()`.
9. Convert model's prediction probabilities to prediction labels with `torch.argmax()`.
10. Plot the image with `matplotlib` and set the title to the prediction label from step 9 and prediction probability from step 8.
> **Note:** This is a similar function to [04. PyTorch Custom Datasets section 11.3's](https://www.learnpytorch.io/04_pytorch_custom_datasets/#113-putting-custom-image-prediction-together-building-a-function) `pred_and_plot_image()` with a few tweaked steps.
What a good looking function!
Let's test it out by making predictions on a few random images from the test set.
We can get a list of all the test image paths using `list(Path(test_dir).glob("*/*.jpg"))`, the stars in the `glob()` method say "any file matching this pattern", in other words, any file ending in `.jpg` (all of our images).
And then we can randomly sample a number of these using Python's [`random.sample(populuation, k)`](https://docs.python.org/3/library/random.html#random.sample) where `population` is the sequence to sample and `k` is the number of samples to retrieve.
Woohoo!
Those predictions look far better than the ones our TinyVGG model was previously making.
### 6.1 Making predictions on a custom image
It looks like our model does well qualitatively on data from the test set.
But how about on our own custom image?
That's where the real fun of machine learning is!
Predicting on your own custom data, outisde of any training or test set.
To test our model on a custom image, let's import the old faithful `pizza-dad.jpeg` image (an image of my dad eating pizza).
We'll then pass it to the `pred_and_plot_image()` function we created above and see what happens.
| 0.839537 | 0.984155 |
```
import data_io
import pandas as pd
import numpy as np
import regex as re
import utils as u
```
Read in manually coded datasets
```
schools = ['Princeton', 'Harvard', 'Yale', 'Columbia','MIT']
dfs = {}
for s in schools:
dfs[s] = pd.read_excel(data_io.DATA+f"{s.lower()}_data_2019_formatted_coded.xlsx")
for k in dfs.keys():
print(len(dfs[k]))
dfs[k] = dfs[k].loc[1:,:]
print(len(dfs[k]))
image_cols = ['article_id','first_researcher_pictured',
'num_male_researchers_pictured', 'num_female_researchers_pictured',
'num_other_researchers_pictured', 'num_women_pictured',
'num_men_pictured', 'num_other_people_pictured']
article_text_cols = ['article_id','gender_first_researcher', 'num_women_mentioned',
'num_men_mentioned', 'num_others_mentioned']
def check_completeness_text(df):
art_df = df.copy()
no_dups = art_df.dropna(subset = ['article_id'])
no_dups = art_df.drop_duplicates(subset = ['article_id'], keep = 'first')
no_gender = no_dups[pd.isnull(no_dups['gender_first_researcher'])]
return no_gender
def check_completeness_photos(df):
art_df = df.copy()
images_only = art_df[image_cols]
no_gender = images_only[pd.isnull(images_only['first_researcher_pictured'])]
return no_gender
incomplete_text = {}
incomplete_photos = {}
for k in dfs.keys():
dfs[k] = dfs[k].reset_index(drop = True)
temp = check_completeness_text(dfs[k])
incomplete_text[k] = temp
print(k, ' number w incomplete text coding: ')
print(len(temp))
temp = check_completeness_photos(dfs[k])
incomplete_photos[k] = temp
print(k, ' number w incomplete image coding: ')
print(len(temp))
```
Look at incomplete articles:
```
yale = dfs["Yale"]
yale[yale['article_id'] == incomplete_text['Yale'].reset_index().loc[0, 'article_id']]
columbia = dfs['Columbia']
#2 pictures didn't save bc they don't render on the webpage
columbia.loc[pd.isnull(columbia['first_researcher_pictured']),:]
def clean_dataset(df_orig, uni = None):
df = df_orig.copy()
row_counts = pd.DataFrame(df['article_id'].value_counts()).reset_index()
if 'new_article_text' in df.columns.to_list():
df = df.drop(columns = ['article_text'])
df = df.rename(columns = {'new_article_text': 'article_text'})
row_counts.columns = ['article_id', 'rows']
row_counts = dict(zip(row_counts['article_id'].to_list(),
row_counts['rows'].to_list()))
df['num_pics'] = df['article_id'].map(row_counts)
gender_dict = {0: 'Unclear',
1: 'Man',
2: 'Woman'}
df['gender_first_researcher_text'] = df['gender_first_researcher'].map(gender_dict)
grpd = df.groupby(['article_id'])
num_first_res_pic = {}
num_fres_pic = {}
num_mres_pic = {}
num_women_pic = {}
num_men_pic = {}
for name, grp in grpd:
num_first_res_pic[name] = sum(grp['first_researcher_pictured'])
num_fres_pic[name] = sum(grp['num_female_researchers_pictured'])
num_mres_pic[name] = sum(grp['num_male_researchers_pictured'])
num_women_pic[name] = sum(grp['num_women_pictured'])
num_men_pic[name] = sum(grp['num_men_pictured'])
df['num_men_mentioned'] = df['num_men_mentioned'].astype(float)
df['num_women_mentioned'] = df['num_women_mentioned'].astype(float)
df['num_first_res_pics'] = df['article_id'].map(num_first_res_pic).astype(int)
df['num_female_res_pics'] = df['article_id'].map(num_fres_pic).astype(int)
df['num_male_res_pics'] = df['article_id'].map(num_mres_pic).astype(int)
df['total_men_pics'] = df['article_id'].map(num_men_pic).astype(int)
df['total_women_pics'] = df['article_id'].map(num_women_pic).astype(int)
df['total_people_mentioned'] = (df['num_women_mentioned'] + df['num_men_mentioned'] +
df['num_others_mentioned'])
if uni:
df['uni'] = uni
return df
```
Remove articles that failed to scrape
```
yale = dfs['Yale']
#One article failed to scrape, remove it
yale = yale[yale['article_id'] != incomplete_text['Yale'].reset_index().loc[0, 'article_id']]
columbia = dfs['Columbia']
#2 pictures didn't save bc they don't render on the webpage
columbia = columbia.dropna(subset=['first_researcher_pictured'])
columbia = columbia.reset_index(drop =True)
yale = clean_dataset(yale, uni = 'yale')
columbia = clean_dataset(columbia, uni = 'columbia')
mit = clean_dataset(dfs['MIT'], uni = 'mit')
princeton = clean_dataset(dfs['Princeton'], uni = 'princeton')
harvard = clean_dataset(dfs['Harvard'], uni = 'harvard')
df = pd.concat([mit, princeton, yale, harvard, columbia], ignore_index=True)
df.dropna(subset=['article_text'])['num_men_mentioned'].isnull().sum()
tmp = df.dropna(subset=['article_link', 'gender_first_researcher_text'])
tmp
tmp = tmp.sample(frac = 1)
unclear = tmp[tmp['gender_first_researcher_text'] == 'Unclear']
unclear = unclear.reset_index(drop = True)
for i in unclear.index:
#These articles don't mention any researchers
print(unclear.loc[i, 'article_link'])
#drop the articles that don't mention any researchers
tmp = tmp[tmp['gender_first_researcher_text']!='Unclear']
tmp
tmp.uni.value_counts()
tmp.gender_first_researcher_text.value_counts()
pd.crosstab(tmp.gender_first_researcher_text, tmp.uni, margins = True).T
tmp.columns
keep_cols = ['article_id', 'article_title', 'article_link',
'article_date', 'gender_first_researcher',
'gender_first_researcher_text',
'uni', 'article_text']
keep_cols.extend([c for c in tmp.columns.to_list() if '_mentioned' in c or '_pictured' in c or '_pics' in c])
keep_cols
tmp_condensed = tmp[keep_cols]
tmp_condensed.to_csv(f"{data_io.DATA}article_data_cleaned.csv", index = False, encoding = 'utf-8-sig')
```
|
github_jupyter
|
import data_io
import pandas as pd
import numpy as np
import regex as re
import utils as u
schools = ['Princeton', 'Harvard', 'Yale', 'Columbia','MIT']
dfs = {}
for s in schools:
dfs[s] = pd.read_excel(data_io.DATA+f"{s.lower()}_data_2019_formatted_coded.xlsx")
for k in dfs.keys():
print(len(dfs[k]))
dfs[k] = dfs[k].loc[1:,:]
print(len(dfs[k]))
image_cols = ['article_id','first_researcher_pictured',
'num_male_researchers_pictured', 'num_female_researchers_pictured',
'num_other_researchers_pictured', 'num_women_pictured',
'num_men_pictured', 'num_other_people_pictured']
article_text_cols = ['article_id','gender_first_researcher', 'num_women_mentioned',
'num_men_mentioned', 'num_others_mentioned']
def check_completeness_text(df):
art_df = df.copy()
no_dups = art_df.dropna(subset = ['article_id'])
no_dups = art_df.drop_duplicates(subset = ['article_id'], keep = 'first')
no_gender = no_dups[pd.isnull(no_dups['gender_first_researcher'])]
return no_gender
def check_completeness_photos(df):
art_df = df.copy()
images_only = art_df[image_cols]
no_gender = images_only[pd.isnull(images_only['first_researcher_pictured'])]
return no_gender
incomplete_text = {}
incomplete_photos = {}
for k in dfs.keys():
dfs[k] = dfs[k].reset_index(drop = True)
temp = check_completeness_text(dfs[k])
incomplete_text[k] = temp
print(k, ' number w incomplete text coding: ')
print(len(temp))
temp = check_completeness_photos(dfs[k])
incomplete_photos[k] = temp
print(k, ' number w incomplete image coding: ')
print(len(temp))
yale = dfs["Yale"]
yale[yale['article_id'] == incomplete_text['Yale'].reset_index().loc[0, 'article_id']]
columbia = dfs['Columbia']
#2 pictures didn't save bc they don't render on the webpage
columbia.loc[pd.isnull(columbia['first_researcher_pictured']),:]
def clean_dataset(df_orig, uni = None):
df = df_orig.copy()
row_counts = pd.DataFrame(df['article_id'].value_counts()).reset_index()
if 'new_article_text' in df.columns.to_list():
df = df.drop(columns = ['article_text'])
df = df.rename(columns = {'new_article_text': 'article_text'})
row_counts.columns = ['article_id', 'rows']
row_counts = dict(zip(row_counts['article_id'].to_list(),
row_counts['rows'].to_list()))
df['num_pics'] = df['article_id'].map(row_counts)
gender_dict = {0: 'Unclear',
1: 'Man',
2: 'Woman'}
df['gender_first_researcher_text'] = df['gender_first_researcher'].map(gender_dict)
grpd = df.groupby(['article_id'])
num_first_res_pic = {}
num_fres_pic = {}
num_mres_pic = {}
num_women_pic = {}
num_men_pic = {}
for name, grp in grpd:
num_first_res_pic[name] = sum(grp['first_researcher_pictured'])
num_fres_pic[name] = sum(grp['num_female_researchers_pictured'])
num_mres_pic[name] = sum(grp['num_male_researchers_pictured'])
num_women_pic[name] = sum(grp['num_women_pictured'])
num_men_pic[name] = sum(grp['num_men_pictured'])
df['num_men_mentioned'] = df['num_men_mentioned'].astype(float)
df['num_women_mentioned'] = df['num_women_mentioned'].astype(float)
df['num_first_res_pics'] = df['article_id'].map(num_first_res_pic).astype(int)
df['num_female_res_pics'] = df['article_id'].map(num_fres_pic).astype(int)
df['num_male_res_pics'] = df['article_id'].map(num_mres_pic).astype(int)
df['total_men_pics'] = df['article_id'].map(num_men_pic).astype(int)
df['total_women_pics'] = df['article_id'].map(num_women_pic).astype(int)
df['total_people_mentioned'] = (df['num_women_mentioned'] + df['num_men_mentioned'] +
df['num_others_mentioned'])
if uni:
df['uni'] = uni
return df
yale = dfs['Yale']
#One article failed to scrape, remove it
yale = yale[yale['article_id'] != incomplete_text['Yale'].reset_index().loc[0, 'article_id']]
columbia = dfs['Columbia']
#2 pictures didn't save bc they don't render on the webpage
columbia = columbia.dropna(subset=['first_researcher_pictured'])
columbia = columbia.reset_index(drop =True)
yale = clean_dataset(yale, uni = 'yale')
columbia = clean_dataset(columbia, uni = 'columbia')
mit = clean_dataset(dfs['MIT'], uni = 'mit')
princeton = clean_dataset(dfs['Princeton'], uni = 'princeton')
harvard = clean_dataset(dfs['Harvard'], uni = 'harvard')
df = pd.concat([mit, princeton, yale, harvard, columbia], ignore_index=True)
df.dropna(subset=['article_text'])['num_men_mentioned'].isnull().sum()
tmp = df.dropna(subset=['article_link', 'gender_first_researcher_text'])
tmp
tmp = tmp.sample(frac = 1)
unclear = tmp[tmp['gender_first_researcher_text'] == 'Unclear']
unclear = unclear.reset_index(drop = True)
for i in unclear.index:
#These articles don't mention any researchers
print(unclear.loc[i, 'article_link'])
#drop the articles that don't mention any researchers
tmp = tmp[tmp['gender_first_researcher_text']!='Unclear']
tmp
tmp.uni.value_counts()
tmp.gender_first_researcher_text.value_counts()
pd.crosstab(tmp.gender_first_researcher_text, tmp.uni, margins = True).T
tmp.columns
keep_cols = ['article_id', 'article_title', 'article_link',
'article_date', 'gender_first_researcher',
'gender_first_researcher_text',
'uni', 'article_text']
keep_cols.extend([c for c in tmp.columns.to_list() if '_mentioned' in c or '_pictured' in c or '_pics' in c])
keep_cols
tmp_condensed = tmp[keep_cols]
tmp_condensed.to_csv(f"{data_io.DATA}article_data_cleaned.csv", index = False, encoding = 'utf-8-sig')
| 0.131828 | 0.660747 |
# Training Against QM Energies and Gradients
This notebook aims to show how the [`descent`](https://github.com/SimonBoothroyd/descent) framework in combination with
[`smirnoffee`](https://github.com/SimonBoothroyd/smirnoffee) can be used to train a set of SMIRNOFF force field bond and
angle force constant parameters against the QM computed energies and associated gradients of a small molecule in
multiple conformers.
For the sake of clarity all warning will be disabled:
```
import warnings
warnings.filterwarnings('ignore')
import logging
logging.getLogger("openff.toolkit").setLevel(logging.ERROR)
```
### Curating a QC training set
For this example we will be training against QM energies which have been computed by and stored within the
[QCArchive](https://qcarchive.molssi.org/), which are easily retrieved using the [OpenFF QCSubmit](https://github.com/openforcefield/openff-qcsubmit)
package.
We begin by importing the records associated with the `OpenFF Optimization Set 1` optimization data set:
```
from qcportal import FractalClient
from openff.qcsubmit.results import OptimizationResultCollection
result_collection = OptimizationResultCollection.from_server(
client=FractalClient(),
datasets="OpenFF Optimization Set 1"
)
```
which we will then filter to retain a small molecule which will be fast to train on as a demonstration:
```
from openff.qcsubmit.results.filters import ConformerRMSDFilter, SMILESFilter
result_collection = result_collection.filter(
SMILESFilter(smiles_to_include=["CC(=O)NCC1=NC=CN1C"]),
# Only retain conformers with an RMSD greater than 0.5 Å.
ConformerRMSDFilter(max_conformers=10, rmsd_tolerance=0.5)
)
print(f"N Molecules: {result_collection.n_molecules}")
print(f"N Conformers: {result_collection.n_results}")
```
You should see that our filtered collection contains the 6 results, which corresponds to 6 minimized conformers (and
their associated energy computed using the OpenFF default B3LYP-D3BJ spec) for the molecule we filtered for above.
In order to be able to train our parameter against this data we need to wrap it in a PyTorch dataset object. This
is made trivial thanks to the built-in ``EnergyDataset`` object that ships with the framework. The energy dataset
will extract and store any energy, gradient, and hessian data in a format ready for evaluating a loss function.
We first load in the initial force field parameters ($\theta$) using the [OpenFF Toolkit](https://github.com/openforcefield/openff-toolkit):
```
from openff.toolkit.typing.engines.smirnoff import ForceField
initial_force_field = ForceField("openff_unconstrained-1.0.0.offxml")
```
which we can then use to construct our dataset:
```
from descent.data.energy import EnergyDataset
training_dataset = EnergyDataset.from_optimization_results(
result_collection,
initial_force_field,
# State that we want to include energies and gradients when computing
# the contribution to the loss function.
include_energies=True,
include_gradients=True,
# State that we want to project the gradients along the RICs
gradient_coordinate_system="ric"
)
```
The returned `training_dataset` will contain one entry object per unique molecule in the `result_collection`:
```
len(training_dataset)
```
as we filtered our initial result collection to only contain a single molecule, so too do we only have a single
contribution.
### Defining the 'model'
For this example we will train all the bond and angle force constants that will be assigned to the molecules in our
training set:
```
from descent.utilities.smirnoff import exercised_parameters
parameter_delta_ids = exercised_parameters(
training_dataset,
handlers_to_include=["Bonds", "Angles"],
attributes_to_include=["k"]
)
parameter_delta_ids[::5]
```
These 'ids' are comprised of the type of SMIRNOFF parameter handler that the parameter originated from,
a key containing the id (in this case the SMIRKS pattern) associated with the parameter and the specific
attribute of the parameter (e.g. the force constant ``k``).
These keys will allow us to define the 'model' that will take an already parameterised system stored in an
``Interchange`` object and perturb the parameters based on the current values of the parameters being trained:
```
from descent.models.smirnoff import SMIRNOFFModel
model = SMIRNOFFModel(parameter_delta_ids, initial_force_field)
```
easily back to more meaningful force field parameters.
### Training the force field parameters
We are finally ready to begin training our force field parameters, or more precisely, the delta value that
we should perturb the force field parameters by to reach better agreement with the training data.
For this example we will be training our force field parameters against:
* the relative energies between each conformer with the first conformer of the molecule
* the deviations between the QM and MM gradients projected along the redundant internal coordinates (RIC) of
the molecule.
Here we define the 'boilerplate PyTorch optimization loop':
```
import torch
from descent import metrics, transforms
lr = 0.01
n_epochs = 200
optimizer = torch.optim.Adam([model.parameter_delta], lr=lr)
for epoch in range(n_epochs):
loss = torch.zeros(1)
for entry in training_dataset:
loss += entry.evaluate_loss(
model,
# Specify that we want use energies relative to the first conformer
# when evaluating the loss function
energy_transforms=transforms.relative(index=0),
# Use the built-in MSE metric when comparing the MM and QM relative
# energies.
energy_metric=metrics.mse(),
# For this example with will use the QM and MM gradients directly when
# computing the loss function.
gradient_transforms=transforms.identity(),
# Use the built-in MSE metric when comparing the MM and QM gradients
gradient_metric=metrics.mse(),
)
loss.backward()
optimizer.step()
optimizer.zero_grad()
if epoch % 20 == 0:
print(f"Epoch {epoch}: loss={loss.item()}")
```
where the only code of note is the ``evaluate_loss`` function that will compute the loss function for us. This function
accepts a number of arguments, but most notable are those that control exactly how the data is transformed (i.e.
compute relative energies) and what form the loss function should take.
We can save our trained parameters back to a SMIRNOFF `.offxml` file for future use:
```
final_force_field = model.to_force_field()
final_force_field.to_file("final.offxml")
```
or print out a summary of the trained values
```
model.summarise(parameter_id_type="id");
```
where here we have chosen to print the unique ID associated with each parameter as opposed to the SMIRKS pattern
(i.e `parameter_id_type="smirks"`) for improved clarity.
|
github_jupyter
|
import warnings
warnings.filterwarnings('ignore')
import logging
logging.getLogger("openff.toolkit").setLevel(logging.ERROR)
from qcportal import FractalClient
from openff.qcsubmit.results import OptimizationResultCollection
result_collection = OptimizationResultCollection.from_server(
client=FractalClient(),
datasets="OpenFF Optimization Set 1"
)
from openff.qcsubmit.results.filters import ConformerRMSDFilter, SMILESFilter
result_collection = result_collection.filter(
SMILESFilter(smiles_to_include=["CC(=O)NCC1=NC=CN1C"]),
# Only retain conformers with an RMSD greater than 0.5 Å.
ConformerRMSDFilter(max_conformers=10, rmsd_tolerance=0.5)
)
print(f"N Molecules: {result_collection.n_molecules}")
print(f"N Conformers: {result_collection.n_results}")
from openff.toolkit.typing.engines.smirnoff import ForceField
initial_force_field = ForceField("openff_unconstrained-1.0.0.offxml")
from descent.data.energy import EnergyDataset
training_dataset = EnergyDataset.from_optimization_results(
result_collection,
initial_force_field,
# State that we want to include energies and gradients when computing
# the contribution to the loss function.
include_energies=True,
include_gradients=True,
# State that we want to project the gradients along the RICs
gradient_coordinate_system="ric"
)
len(training_dataset)
from descent.utilities.smirnoff import exercised_parameters
parameter_delta_ids = exercised_parameters(
training_dataset,
handlers_to_include=["Bonds", "Angles"],
attributes_to_include=["k"]
)
parameter_delta_ids[::5]
from descent.models.smirnoff import SMIRNOFFModel
model = SMIRNOFFModel(parameter_delta_ids, initial_force_field)
import torch
from descent import metrics, transforms
lr = 0.01
n_epochs = 200
optimizer = torch.optim.Adam([model.parameter_delta], lr=lr)
for epoch in range(n_epochs):
loss = torch.zeros(1)
for entry in training_dataset:
loss += entry.evaluate_loss(
model,
# Specify that we want use energies relative to the first conformer
# when evaluating the loss function
energy_transforms=transforms.relative(index=0),
# Use the built-in MSE metric when comparing the MM and QM relative
# energies.
energy_metric=metrics.mse(),
# For this example with will use the QM and MM gradients directly when
# computing the loss function.
gradient_transforms=transforms.identity(),
# Use the built-in MSE metric when comparing the MM and QM gradients
gradient_metric=metrics.mse(),
)
loss.backward()
optimizer.step()
optimizer.zero_grad()
if epoch % 20 == 0:
print(f"Epoch {epoch}: loss={loss.item()}")
final_force_field = model.to_force_field()
final_force_field.to_file("final.offxml")
model.summarise(parameter_id_type="id");
| 0.804175 | 0.984215 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.