code
stringlengths 2.5k
150k
| kind
stringclasses 1
value |
---|---|
**Create Train / Dev / Test files. <br> Each file is a dictionary where each key represent the ID of a certain Author and each value is a dict where the keys are : <br> - author_embedding : the Node embedding that correspond to the author (tensor of shape (128,)) <br> - papers_embedding : the abstract embedding of every papers (tensor of shape (10,dim)) (dim depend on the embedding model taken into account) <br> - features : the graph structural features (tensor of shape (4,)) <br> - y : the target (tensor of shape (1,))**
```
import pandas as pd
import numpy as np
import networkx as nx
from tqdm import tqdm_notebook as tqdm
from sklearn.utils import shuffle
import gzip
import pickle
import torch
def load_dataset_file(filename):
with gzip.open(filename, "rb") as f:
loaded_object = pickle.load(f)
return loaded_object
def save(object, filename, protocol = 0):
"""Saves a compressed object to disk
"""
file = gzip.GzipFile(filename, 'wb')
file.write(pickle.dumps(object, protocol))
file.close()
```
# Roberta Embedding
```
# Load the paper's embedding
embedding_per_paper = load_dataset_file('/content/drive/MyDrive/altegrad_datachallenge/files_generated/embedding_per_paper_clean.txt')
# Load the node's embedding
embedding_per_nodes = load_dataset_file('/content/drive/MyDrive/altegrad_datachallenge/files_generated/Node2Vec.txt')
# read the file to create a dictionary with author key and paper list as value
f = open("/content/drive/MyDrive/altegrad_datachallenge/author_papers.txt","r")
papers_per_author = {}
for l in f:
auth_paps = [paper_id.strip() for paper_id in l.split(":")[1].replace("[","").replace("]","").replace("\n","").replace("\'","").replace("\"","").split(",")]
papers_per_author[l.split(":")[0]] = auth_paps
# Load train set
df_train = shuffle(pd.read_csv('/content/drive/MyDrive/altegrad_datachallenge/train.csv', dtype={'authorID': np.int64, 'h_index': np.float32})).reset_index(drop=True)
# Load test set
df_test = pd.read_csv('/content/drive/MyDrive/altegrad_datachallenge/test.csv', dtype={'authorID': np.int64})
# Load Graph
G = nx.read_edgelist('/content/drive/MyDrive/altegrad_datachallenge/collaboration_network.edgelist', delimiter=' ', nodetype=int)
# computes structural features for each node
core_number = nx.core_number(G)
avg_neighbor_degree = nx.average_neighbor_degree(G)
# Split into train/valid
df_valid = df_train.iloc[int(len(df_train)*0.9):, :]
df_train = df_train.iloc[:int(len(df_train)*0.9), :]
```
## Train
```
train_data = {}
for i, row in tqdm(df_train.iterrows()):
author_id, y = str(int(row['authorID'])), row['h_index']
degree, core_number_, avg_neighbor_degree_ = G.degree(int(author_id)), core_number[int(author_id)], avg_neighbor_degree[int(author_id)]
author_embedding = torch.from_numpy(embedding_per_nodes[int(author_id)].reshape(1,-1))
papers_ids = papers_per_author[author_id]
papers_embedding = []
num_papers = 0
for id_paper in papers_ids:
num_papers += 1
try:
papers_embedding.append(torch.from_numpy(embedding_per_paper[id_paper].reshape(1,-1)))
except KeyError:
print(f"Missing paper for {author_id}")
papers_embedding.append(torch.zeros((1,768)))
papers_embedding = torch.cat(papers_embedding, dim=0)
additional_features = torch.from_numpy(np.array([degree, core_number_, avg_neighbor_degree_, num_papers]).reshape(1,-1))
y = torch.Tensor([y])
train_data[author_id] = {'author_embedding': author_embedding, 'papers_embedding': papers_embedding, 'features': additional_features, 'target': y}
# Saving
save(train_data, '/content/drive/MyDrive/altegrad_datachallenge/data/data.train')
# Deleting (memory)
del train_data
```
## Validation
```
valid_data = {}
for i, row in tqdm(df_valid.iterrows()):
author_id, y = str(int(row['authorID'])), row['h_index']
degree, core_number_, avg_neighbor_degree_ = G.degree(int(author_id)), core_number[int(author_id)], avg_neighbor_degree[int(author_id)]
author_embedding = torch.from_numpy(embedding_per_nodes[int(author_id)].reshape(1,-1))
papers_ids = papers_per_author[author_id]
papers_embedding = []
num_papers = 0
for id_paper in papers_ids:
num_papers += 1
try:
papers_embedding.append(torch.from_numpy(embedding_per_paper[id_paper].reshape(1,-1)))
except KeyError:
papers_embedding.append(torch.zeros((1,768)))
papers_embedding = torch.cat(papers_embedding, dim=0)
additional_features = torch.from_numpy(np.array([degree, core_number_, avg_neighbor_degree_, num_papers]).reshape(1,-1))
y = torch.Tensor([y])
valid_data[author_id] = {'author_embedding': author_embedding, 'papers_embedding': papers_embedding, 'features': additional_features, 'target': y}
save(valid_data, '/content/drive/MyDrive/altegrad_datachallenge/data/data.valid')
del valid_data
```
## Test
```
test_data = {}
for i, row in tqdm(df_test.iterrows()):
author_id = str(int(row['authorID']))
degree, core_number_, avg_neighbor_degree_ = G.degree(int(author_id)), core_number[int(author_id)], avg_neighbor_degree[int(author_id)]
author_embedding = torch.from_numpy(embedding_per_nodes[int(author_id)].reshape(1,-1))
papers_ids = papers_per_author[author_id]
papers_embedding = []
num_papers = 0
for id_paper in papers_ids:
num_papers += 1
try:
papers_embedding.append(torch.from_numpy(embedding_per_paper[id_paper].reshape(1,-1)))
except KeyError:
papers_embedding.append(torch.zeros((1,768)))
papers_embedding = torch.cat(papers_embedding, dim=0)
additional_features = torch.from_numpy(np.array([degree, core_number_, avg_neighbor_degree_, num_papers]).reshape(1,-1))
test_data[author_id] = {'author_embedding': author_embedding, 'papers_embedding': papers_embedding, 'features': additional_features}
del G
del df_test
del embedding_per_paper
del papers_per_author
del core_number
del avg_neighbor_degree
del embedding_per_nodes
save(test_data, '/content/drive/MyDrive/altegrad_datachallenge/data/data.test', 4)
del test_data
```
# Doc2Vec
```
# Load the paper's embedding
embedding_per_paper = load_dataset_file('/content/drive/MyDrive/altegrad_datachallenge/files_generated/doc2vec_paper_embedding.txt')
# Load the node's embedding
embedding_per_nodes = load_dataset_file('/content/drive/MyDrive/altegrad_datachallenge/files_generated/Node2Vec.txt')
# read the file to create a dictionary with author key and paper list as value
f = open("/content/drive/MyDrive/altegrad_datachallenge/data/author_papers.txt","r")
papers_per_author = {}
for l in f:
auth_paps = [paper_id.strip() for paper_id in l.split(":")[1].replace("[","").replace("]","").replace("\n","").replace("\'","").replace("\"","").split(",")]
papers_per_author[l.split(":")[0]] = auth_paps
# Load train set
df_train = shuffle(pd.read_csv('/content/drive/MyDrive/altegrad_datachallenge/data/train.csv', dtype={'authorID': np.int64, 'h_index': np.float32})).reset_index(drop=True)
# Load test set
df_test = pd.read_csv('/content/drive/MyDrive/altegrad_datachallenge/data/test.csv', dtype={'authorID': np.int64})
# Load Graph
G = nx.read_edgelist('/content/drive/MyDrive/altegrad_datachallenge/data/collaboration_network.edgelist', delimiter=' ', nodetype=int)
# computes structural features for each node
core_number = nx.core_number(G)
avg_neighbor_degree = nx.average_neighbor_degree(G)
# Split into train/valid
df_valid = df_train.iloc[int(len(df_train)*0.9):, :]
df_train = df_train.iloc[:int(len(df_train)*0.9), :]
```
## Train
```
train_data = {}
for i, row in tqdm(df_train.iterrows()):
author_id, y = str(int(row['authorID'])), row['h_index']
degree, core_number_, avg_neighbor_degree_ = G.degree(int(author_id)), core_number[int(author_id)], avg_neighbor_degree[int(author_id)]
author_embedding = torch.from_numpy(embedding_per_nodes[int(author_id)].reshape(1,-1))
papers_ids = papers_per_author[author_id]
papers_embedding = []
num_papers = 0
for id_paper in papers_ids:
num_papers += 1
try:
papers_embedding.append(torch.from_numpy(embedding_per_paper[id_paper].reshape(1,-1)))
except KeyError:
print(f"Missing paper for {author_id}")
papers_embedding.append(torch.zeros((1,256)))
papers_embedding = torch.cat(papers_embedding, dim=0)
additional_features = torch.from_numpy(np.array([degree, core_number_, avg_neighbor_degree_, num_papers]).reshape(1,-1))
y = torch.Tensor([y])
train_data[author_id] = {'author_embedding': author_embedding, 'papers_embedding': papers_embedding, 'features': additional_features, 'target': y}
# Saving
save(train_data, '/content/drive/MyDrive/altegrad_datachallenge/data/d2v.train')
# Deleting (memory)
del train_data
```
## Dev
```
valid_data = {}
for i, row in tqdm(df_valid.iterrows()):
author_id, y = str(int(row['authorID'])), row['h_index']
degree, core_number_, avg_neighbor_degree_ = G.degree(int(author_id)), core_number[int(author_id)], avg_neighbor_degree[int(author_id)]
author_embedding = torch.from_numpy(embedding_per_nodes[int(author_id)].reshape(1,-1))
papers_ids = papers_per_author[author_id]
papers_embedding = []
num_papers = 0
for id_paper in papers_ids:
num_papers += 1
try:
papers_embedding.append(torch.from_numpy(embedding_per_paper[id_paper].reshape(1,-1)))
except KeyError:
papers_embedding.append(torch.zeros((1,256)))
papers_embedding = torch.cat(papers_embedding, dim=0)
additional_features = torch.from_numpy(np.array([degree, core_number_, avg_neighbor_degree_, num_papers]).reshape(1,-1))
y = torch.Tensor([y])
valid_data[author_id] = {'author_embedding': author_embedding, 'papers_embedding': papers_embedding, 'features': additional_features, 'target': y}
save(valid_data, '/content/drive/MyDrive/altegrad_datachallenge/data/d2v.valid')
del valid_data
```
## Test
```
test_data = {}
for i, row in tqdm(df_test.iterrows()):
author_id = str(int(row['authorID']))
degree, core_number_, avg_neighbor_degree_ = G.degree(int(author_id)), core_number[int(author_id)], avg_neighbor_degree[int(author_id)]
author_embedding = torch.from_numpy(embedding_per_nodes[int(author_id)].reshape(1,-1))
papers_ids = papers_per_author[author_id]
papers_embedding = []
num_papers = 0
for id_paper in papers_ids:
num_papers += 1
try:
papers_embedding.append(torch.from_numpy(embedding_per_paper[id_paper].reshape(1,-1)))
except KeyError:
papers_embedding.append(torch.zeros((1,256)))
papers_embedding = torch.cat(papers_embedding, dim=0)
additional_features = torch.from_numpy(np.array([degree, core_number_, avg_neighbor_degree_, num_papers]).reshape(1,-1))
test_data[author_id] = {'author_embedding': author_embedding, 'papers_embedding': papers_embedding, 'features': additional_features}
del G
del df_test
del embedding_per_paper
del papers_per_author
del core_number
del avg_neighbor_degree
del embedding_per_nodes
save(test_data, '/content/drive/MyDrive/altegrad_datachallenge/data/d2v.test', 4)
del test_data
```
| github_jupyter |
# WeatherPy
----
#### Note
* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
```
!pip3 install citipy
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import time
from scipy.stats import linregress
# Import API key
from api_keys import weather_api_key
# Incorporated citipy to determine city based on latitude and longitude
from citipy import citipy
# Output File (CSV)
output_data_file = "output_data/cities.csv"
# Range of latitudes and longitudes
lat_range = (-90, 90)
lng_range = (-180, 180)
```
## Generate Cities List
```
# List for holding lat_lngs and cities
lat_lngs = []
cities = []
# Create a set of random lat and lng combinations
lats = np.random.uniform(lat_range[0], lat_range[1], size=1500)
lngs = np.random.uniform(lng_range[0], lng_range[1], size=1500)
lat_lngs = zip(lats, lngs)
# Identify nearest city for each lat, lng combination
for lat_lng in lat_lngs:
city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name
# If the city is unique, then add it to a our cities list
if city not in cities:
cities.append(city)
# Print the city count to confirm sufficient count
len(cities) #617
```
### Perform API Calls
* Perform a weather check on each city using a series of successive API calls.
* Include a print log of each city as it'sbeing processed (with the city number and city name).
```
# Save config information
url = "http://api.openweathermap.org/data/2.5/weather?units=Imperial&"
base_url = f"{url}APPID={weather_api_key}&q="
city_data = []
print("Beginning Data Retrieval")
print("--------------------------")
# use iterrows to iterate through pandas dataframe
for index, city in enumerate(cities):
print(f"Processing record {index}: {city}")
try:
# assemble url and make API request
response = requests.get(base_url + city).json()
# if index == 1:
# print(base_url + city)
city_lat = response['coord']["lat"]
city_lon = response['coord']["lon"]
max_temp = response['main']['temp_max']
humidity = response['main']['humidity']
cloudiness = response['clouds']['all']
wind_speed = response['wind']['speed']
country = response['sys']['country']
date = response['dt']
#Store data for each city found
city_data.append({"City": city,
"Lat": city_lat,
"Lon": city_lon,
"Max Temp": max_temp,
"Humidity": humidity,
"Cloudiness": cloudiness,
"Wind Speed": wind_speed,
"Country": country,
"Date": date})
except (KeyError, IndexError):
print("City Not found.Skipping...")
print("--------------------------")
print("Data Retrieval Complete")
print("--------------------------")
```
### Convert Raw Data to DataFrame
* Export the city data into a .csv.
* Display the DataFrame
```
# Make city data into DF and export the cities DataFrame to a CSV file
city_df = pd.DataFrame(city_data)
city_df.describe()
city_df.to_csv(output_data_file, index_label="City ID")
city_df.head()
city_df.count()
city_df.describe()
```
## Inspect the data and remove the cities where the humidity > 100%.
----
Skip this step if there are no cities that have humidity > 100%.
```
city_df.loc[city_df["Humidity"] > 100] # No rows returned
# Get the indices of cities that have humidity over 100%.
# Make a new DataFrame equal to the city data to drop all humidity outliers by index.
# Passing "inplace=False" will make a copy of the city_data DataFrame, which we call "clean_city_data".
```
## Plotting the Data
* Use proper labeling of the plots using plot titles (including date of analysis) and axes labels.
* Save the plotted figures as .pngs.
## Latitude vs. Temperature Plot
```
#Make plot
temp = city_df["Max Temp"]
lat = city_df["Lat"]
plt.scatter(lat, temp, marker="o", facecolors="blue", edgecolors="black", alpha=0.75)
# Set y lim based on max and min temp
plt.ylim(min(temp) - 5, max(temp) + 5)
# Set the x lim based on max and min lat
plt.xlim(min(lat) - 5, max(lat) + 5)
# Create a title, x label, and y label for our chart
plt.title("Latitude v Temperature Plot")
plt.xlabel("Latitude")
plt.ylabel("Temperature (Fahrenheit)")
plt.savefig("Latitude_Temperature.png")
# Prints the scatter plot to the screen
plt.show()
#The plot shows that the further away a location is from the equater, the lower the max temperate
#The more extreme cold temperates (< 0 degrees F) are all in the Northern hemisphere.
```
## Latitude vs. Humidity Plot
```
#Make plot
humid = city_df["Humidity"]
lat = city_df["Lat"]
plt.scatter(lat, humid, marker="o", facecolors="blue", edgecolors="black", alpha=0.75)
# Set y lim based on max and min humudity
plt.ylim(min(humid) - 5, max(humid) + 5)
# Set the x lim based on max and min lat
plt.xlim(min(lat) - 5, max(lat) + 5)
# Create a title, x label, and y label for our chart
plt.title("Latitude v Humidity Plot")
plt.xlabel("Latitude")
plt.ylabel("Humity (%)")
plt.savefig("Latitude_Humidity.png")
# Prints the scatter plot to the screen
plt.show()
#There isn't too much of a trend for this scatter plot
#The cities with a higher percent of humidity are near the equater and around a latitude of 50
```
## Latitude vs. Cloudiness Plot
```
#Make plot
cloud = city_df["Cloudiness"]
lat = city_df["Lat"]
plt.scatter(lat, cloud, marker="o", facecolors="blue", edgecolors="black", alpha=0.75)
# Set y lim based on max and min cloudiness
plt.ylim(min(cloud) - 5, max(cloud) + 5)
# Set the x lim based on max and min lat
plt.xlim(min(lat) - 5, max(lat) + 5)
# Create a title, x label, and y label for our chart
plt.title("Latitude v Cloudiness Plot")
plt.xlabel("Latitude")
plt.ylabel("Cloudiness (%)")
plt.savefig("Latitude_Cloudiness.png")
# Prints the scatter plot to the screen
plt.show()
#Once again not too much of a general trend
#But there does seem to be either high cloudines or no cloudiness for most the data (some in between)
```
## Latitude vs. Wind Speed Plot
```
#Make plot
wind = city_df["Wind Speed"]
lat = city_df["Lat"]
plt.scatter(lat, wind, marker="o", facecolors="blue", edgecolors="black", alpha=0.75)
# Set y lim based on max and min cloudiness
plt.ylim(-0.75, max(wind) + 5)
# Set the x lim based on max and min lat
plt.xlim(min(lat) - 5, max(lat) + 5)
# Create a title, x label, and y label for our chart
plt.title("Latitude v Cloudiness Plot")
plt.xlabel("Latitude")
plt.ylabel("Windiness (mph)")
plt.savefig("Latitude_WindSpeed.png")
# Prints the scatter plot to the screen
plt.show()
#Most of the data has wind speeds between 0 and 15 mph
#Latitude does not seem to have an effect on wind speed
```
## Linear Regression
```
#Separate our data frame into different hemispheres
city_df.head()
nor_hem_df = city_df.loc[city_df["Lat"] >= 0]
so_hem_df = city_df.loc[city_df["Lat"] <= 0]
```
#### Northern Hemisphere - Max Temp vs. Latitude Linear Regression
```
#Make plot
temp = nor_hem_df["Max Temp"]
lat = nor_hem_df["Lat"]
plt.scatter(lat, temp, marker="o", facecolors="blue", edgecolors="black", alpha=0.75)
# Set y lim based on max and min temp
plt.ylim(min(temp) - 5, max(temp) + 5)
# Set the x lim based on max and min lat
plt.xlim(min(lat) - 5, max(lat) + 5)
# Create a title, x label, and y label for our chart
plt.title("Northern Hemisphere - Latitude v Max Temperature Plot")
plt.xlabel("Latitude")
plt.ylabel("Max Temperature (Fahrenheit)")
# Add linear regression Line
(slope, intercept, rvalue, pvalue, stderr) = linregress(lat, temp)
regress_values = lat * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.plot(lat,regress_values,"r-")
plt.annotate(line_eq,(6,10),fontsize=15,color="red")
plt.savefig("NH_Latitude_Tem.png")
# Prints the scatter plot to the screen
plt.show()
#This plot is showing the relationship between latitude of cities in the Northern Hemisphere and their max temperature
#The further a city is from the equater(x > 0), the lower the max temp
```
#### Southern Hemisphere - Max Temp vs. Latitude Linear Regression
```
#Make plot
temp = so_hem_df["Max Temp"]
lat = so_hem_df["Lat"]
plt.scatter(lat, temp, marker="o", facecolors="blue", edgecolors="black", alpha=0.75)
# Set y lim based on max and min temp
plt.ylim(min(temp) - 5, max(temp) + 5)
# Set the x lim based on max and min lat
plt.xlim(min(lat) - 2, max(lat) + 2)
# Create a title, x label, and y label for our chart
plt.title("Southern Hemisphere - Latitude v Max Temperature Plot")
plt.xlabel("Latitude")
plt.ylabel("Max Temperature (Fahrenheit)")
# Add linear regression Line
(slope, intercept, rvalue, pvalue, stderr) = linregress(lat, temp)
regress_values = lat * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.plot(lat,regress_values,"r-")
plt.annotate(line_eq,(0, 80),fontsize=15,color="red")
plt.savefig("SH_Latitude_Temp.png")
# Prints the scatter plot to the screen
plt.show()
#This plot is showing the relationship between latitude of cities in the Southern Hemisphere and their max temperature
#The further a city is from the equater(x < 0), the lower the max temp
```
#### Northern Hemisphere - Humidity (%) vs. Latitude Linear Regression
```
#Make plot
humid = nor_hem_df["Humidity"]
lat = nor_hem_df["Lat"]
plt.scatter(lat, humid, marker="o", facecolors="blue", edgecolors="black", alpha=0.75)
# Set y lim based on max and min humudity
plt.ylim(min(humid) - 5, max(humid) + 5)
# Set the x lim based on max and min lat
plt.xlim(min(lat) - 5, max(lat) + 5)
# Create a title, x label, and y label for our chart
plt.title("Northern Hemisphere - Latitude v Humidity Plot")
plt.xlabel("Latitude")
plt.ylabel("Humity (%)")
# Add linear regression Line
(slope, intercept, rvalue, pvalue, stderr) = linregress(lat, humid)
regress_values = lat * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.plot(lat,regress_values,"r-")
plt.annotate(line_eq,(48, 40),fontsize=15,color="red")
plt.savefig("NH_Latitude_Humidity.png")
# Prints the scatter plot to the screen
plt.show()
#This plot is showing the relationship between latitude of cities in the Northern Hemisphere and their humidity percentage
#The further a city is from the equater(x > 0), the higher percent humidity
```
#### Southern Hemisphere - Humidity (%) vs. Latitude Linear Regression
```
#Make plot
humid = so_hem_df["Humidity"]
lat = so_hem_df["Lat"]
plt.scatter(lat, humid, marker="o", facecolors="blue", edgecolors="black", alpha=0.75)
# Set y lim based on max and min humudity
plt.ylim(min(humid) - 5, max(humid) + 5)
# Set the x lim based on max and min lat
plt.xlim(min(lat) - 5, max(lat) + 5)
# Create a title, x label, and y label for our chart
plt.title("Southern Hemisphere - Latitude v Humidity Plot")
plt.xlabel("Latitude")
plt.ylabel("Humity (%)")
# Add linear regression Line
(slope, intercept, rvalue, pvalue, stderr) = linregress(lat, humid)
regress_values = lat * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.plot(lat,regress_values,"r-")
plt.annotate(line_eq,(5,85),fontsize=15,color="red")
plt.savefig("SH_Latitude_Humidity.png")
# Prints the scatter plot to the screen
plt.show()
#This plot is showing the relationship between latitude of cities in the Southern Hemisphere and their humidty percentage
#The further a city is from the equater(x < 0), the lower the percent of humdity
```
#### Northern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
```
#Make plot
cloud = nor_hem_df["Cloudiness"]
lat = nor_hem_df["Lat"]
plt.scatter(lat, cloud, marker="o", facecolors="blue", edgecolors="black", alpha=0.75)
# Set y lim based on max and min humudity
plt.ylim(min(cloud) - 5, max(cloud) + 5)
# Set the x lim based on max and min lat
plt.xlim(min(lat) - 5, max(lat) + 5)
# Create a title, x label, and y label for our chart
plt.title("Northern Hemisphere - Latitude v Cloudiness Plot")
plt.xlabel("Latitude")
plt.ylabel("Humity (%)")
# Add linear regression Line
(slope, intercept, rvalue, pvalue, stderr) = linregress(lat, cloud)
regress_values = lat * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.plot(lat,regress_values,"r-")
plt.annotate(line_eq,(60,50),fontsize=15,color="red")
plt.savefig("NH_Latitude_Cloudiness.png")
# Prints the scatter plot to the screen
plt.show()
#This plot is showing the relationship between latitude of cities in the Northern Hemisphere and their cloudiness percentage
#In general, the further a city is from the equater(x > 0), the higher the cloudiness percentage
```
#### Southern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
```
#Make plot
cloud = so_hem_df["Cloudiness"]
lat = so_hem_df["Lat"]
plt.scatter(lat, cloud, marker="o", facecolors="blue", edgecolors="black", alpha=0.75)
# Set y lim based on max and min humudity
plt.ylim(min(cloud) - 5, max(cloud) + 5)
# Set the x lim based on max and min lat
plt.xlim(min(lat) - 5, max(lat) + 5)
# Create a title, x label, and y label for our chart
plt.title("Southern Hemisphere - Latitude v Cloudiness Plot")
plt.xlabel("Latitude")
plt.ylabel("Humity (%)")
# Add linear regression Line
(slope, intercept, rvalue, pvalue, stderr) = linregress(lat, cloud)
regress_values = lat * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.plot(lat,regress_values,"r-")
plt.annotate(line_eq,(0,60),fontsize=15,color="red")
plt.savefig("SH_Latitude_Cloudiness.png")
# Prints the scatter plot to the screen
plt.show()
#This plot is showing the relationship between latitude of cities in the Southern Hemisphere and their cloudiness percentage
#In general, the further a city is from the equater(x < 0), the lower the cloudiness percentage
```
#### Northern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
```
#Make plot
wind = nor_hem_df["Wind Speed"]
lat = nor_hem_df["Lat"]
plt.scatter(lat, wind, marker="o", facecolors="blue", edgecolors="black", alpha=0.75)
# Set y lim based on max and min humudity
plt.ylim(min(wind) - 5, max(wind) + 5)
# Set the x lim based on max and min lat
plt.xlim(min(lat) - 5, max(lat) + 5)
# Create a title, x label, and y label for our chart
plt.title("Northern Hemisphere - Latitude v Windiness Plot")
plt.xlabel("Latitude")
plt.ylabel("Windiness (mph)")
# Add linear regression Line
(slope, intercept, rvalue, pvalue, stderr) = linregress(lat, wind)
regress_values = lat * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.plot(lat,regress_values,"r-")
plt.annotate(line_eq,(6,30),fontsize=15,color="red")
plt.savefig("NH_Latitude_Windiness.png")
# Prints the scatter plot to the screen
plt.show()
#This plot is showing the relationship between latitude of cities in the Northern Hemisphere and their wind speed
#The slope in this case is very small. There is not a significant change in wind spped the further a city is from the equater
```
#### Southern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
```
#Make plot
wind = so_hem_df["Wind Speed"]
lat = so_hem_df["Lat"]
plt.scatter(lat, wind, marker="o", facecolors="blue", edgecolors="black", alpha=0.75)
# Set y lim based on max and min humudity
plt.ylim(min(wind) - 5, max(wind) + 5)
# Set the x lim based on max and min lat
plt.xlim(min(lat) - 5, max(lat) + 5)
# Create a title, x label, and y label for our chart
plt.title("Southern Hemisphere - Latitude v Windiness Plot")
plt.xlabel("Latitude")
plt.ylabel("Windiness (mph)")
# Add linear regression Line
(slope, intercept, rvalue, pvalue, stderr) = linregress(lat, wind)
regress_values = lat * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.plot(lat,regress_values,"r-")
plt.annotate(line_eq,(-50,23),fontsize=15,color="red")
plt.savefig("SH_Latitude_Windiness.png")
# Prints the scatter plot to the screen
plt.show()
#This plot is showing the relationship between latitude of cities in the Southern Hemisphere and their wind speed
#The slope in this case is also small but there is a slight change in wind speed the further a city is from the equater
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
# x = Acos(k/m t + \theta) = 1
# p = mx' = Ak/m sin(k/m t + \theta)
t = np.linspace(0, 2 * np.pi, 100)
t
```
# Exact Equation
```
x, p = np.cos(t - np.pi), -np.sin(t - np.pi)
fig = plt.figure(figsize=(5, 5))
for i in range(0, len(t), 1):
plt.plot(x[i:i+2], p[i:i+2], color='black', markersize=0)
```
# Euler's Method Equation
# Euler's Method
```
# x' = p/m = p
# p' = -kx + mg = -x
# x = x + \eps * p' = x + \eps*(p)
# p = p + \eps * x' = p - \eps*(x)
fig = plt.figure(figsize=(5, 5))
plt.title("Euler's Method (eps=0.1)")
plt.xlabel("position (q)")
plt.ylabel("momentum (p)")
for i in range(0, len(t), 1):
plt.plot(x[i:i+2], p[i:i+2], color='black', markersize=0)
x_prev = 0
p_prev = 1
eps = 0.1
steps = 100
for i in range(0, steps, 1):
x_next = x_prev + eps * p_prev
p_next = p_prev - eps * x_prev
plt.plot([x_prev, x_next], [p_prev, p_next], marker='o', color='blue', markersize=5)
x_prev, p_prev = x_next, p_next
```
# Modified Euler's Method
```
# x' = p/m = p
# p' = -kx + mg = -x
# x = x + \eps * p' = x + \eps*(p)
# p = p + \eps * x' = p - \eps*(x)
fig = plt.figure(figsize=(5, 5))
plt.title("Modified Euler's Method (eps=0.2)")
plt.xlabel("position (q)")
plt.ylabel("momentum (p)")
for i in range(0, len(t), 1):
plt.plot(x[i:i+2], p[i:i+2], color='black', markersize=0)
x_prev = 0
p_prev = 1
eps = 0.2
steps = int(2*np.pi / eps)
for i in range(0, steps, 1):
p_next = p_prev - eps * x_prev
x_next = x_prev + eps * p_next
plt.plot([x_prev, x_next], [p_prev, p_next], marker='o', color='blue', markersize=5)
x_prev, p_prev = x_next, p_next
# x' = p/m = p
# p' = -kx + mg = -x
# x = x + \eps * p' = x + \eps*(p)
# p = p + \eps * x' = p - \eps*(x)
fig = plt.figure(figsize=(5, 5))
plt.title("Modified Euler's Method (eps=0.2)")
plt.xlabel("position (q)")
plt.ylabel("momentum (p)")
for i in range(0, len(t), 1):
plt.plot(x[i:i+2], p[i:i+2], color='black', markersize=0)
x_prev = 0.1
p_prev = 1
eps = 1.31827847281
#eps = 1.31827847281
steps = 50 #int(2*np.pi / eps)
for i in range(0, steps, 1):
p_next = p_prev - eps * x_prev
x_next = x_prev + eps * p_next
plt.plot([x_prev, x_next], [p_prev, p_next], marker='o', color='blue', markersize=5)
x_prev, p_prev = x_next, p_next
```
# Leapfrog Method
```
# x' = p/m = p
# p' = -kx + mg = -x
# x = x + \eps * p' = x + \eps*(p)
# p = p + \eps * x' = p - \eps*(x)
fig = plt.figure(figsize=(5, 5))
plt.title("Leapfrog Method (eps=0.2)")
plt.xlabel("position (q)")
plt.ylabel("momentum (p)")
for i in range(0, len(t), 1):
plt.plot(x[i:i+2], p[i:i+2], color='black', markersize=0)
x_prev = 0
p_prev = 1
eps = 0.2
steps = int(2*np.pi / eps)
for i in range(0, steps, 1):
p_half = p_prev - eps/2 * x_prev
x_next = x_prev + eps * p_half
p_next = p_half - eps/2 * x_next
plt.plot([x_prev, x_next], [p_prev, p_next], marker='o', color='blue', markersize=5)
x_prev, p_prev = x_next, p_next
# x' = p/m = p
# p' = -kx + mg = -x
# x = x + \eps * p' = x + \eps*(p)
# p = p + \eps * x' = p - \eps*(x)
fig = plt.figure(figsize=(5, 5))
plt.title("Leapfrog Method (eps=0.9)")
plt.xlabel("position (q)")
plt.ylabel("momentum (p)")
for i in range(0, len(t), 1):
plt.plot(x[i:i+2], p[i:i+2], color='black', markersize=0)
x_prev = 0
p_prev = 1
eps = 0.9
steps = 3 * int(2*np.pi / eps + 0.1)
for i in range(0, steps, 1):
p_half = p_prev - eps/2 * x_prev
x_next = x_prev + eps * p_half
p_next = p_half - eps/2 * x_next
plt.plot([x_prev, x_next], [p_prev, p_next], marker='o', color='blue', markersize=5)
x_prev, p_prev = x_next, p_next
```
# Combined Figure
```
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15,15))
# subplot1
ax1.set_title("Euler's Method (eps=0.1)")
ax1.set_xlabel("position (q)")
ax1.set_ylabel("momentum (p)")
for i in range(0, len(t), 1):
ax1.plot(x[i:i+2], p[i:i+2], color='black', markersize=0)
x_prev = 0
p_prev = 1
eps = 0.1
steps = 100
for i in range(0, steps, 1):
x_next = x_prev + eps * p_prev
p_next = p_prev - eps * x_prev
ax1.plot([x_prev, x_next], [p_prev, p_next], marker='o', color='blue', markersize=5)
x_prev, p_prev = x_next, p_next
# subplot2
ax2.set_title("Modified Euler's Method (eps=0.2)")
ax2.set_xlabel("position (q)")
ax2.set_ylabel("momentum (p)")
for i in range(0, len(t), 1):
ax2.plot(x[i:i+2], p[i:i+2], color='black', markersize=0)
x_prev = 0
p_prev = 1
eps = 0.2
steps = int(2*np.pi / eps)
for i in range(0, steps, 1):
p_next = p_prev - eps * x_prev
x_next = x_prev + eps * p_next
ax2.plot([x_prev, x_next], [p_prev, p_next], marker='o', color='blue', markersize=5)
x_prev, p_prev = x_next, p_next
# subplot3
ax3.set_title("Leapfrog Method (eps=0.2)")
ax3.set_xlabel("position (q)")
ax3.set_ylabel("momentum (p)")
for i in range(0, len(t), 1):
ax3.plot(x[i:i+2], p[i:i+2], color='black', markersize=0)
x_prev = 0
p_prev = 1
eps = 0.2
steps = int(2*np.pi / eps)
for i in range(0, steps, 1):
p_half = p_prev - eps/2 * x_prev
x_next = x_prev + eps * p_half
p_next = p_half - eps/2 * x_next
ax3.plot([x_prev, x_next], [p_prev, p_next], marker='o', color='blue', markersize=5)
x_prev, p_prev = x_next, p_next
# subplot4
ax4.set_title("Leapfrog Method (eps=0.9)")
ax4.set_xlabel("position (q)")
ax4.set_ylabel("momentum (p)")
for i in range(0, len(t), 1):
ax4.plot(x[i:i+2], p[i:i+2], color='black', markersize=0)
x_prev = 0
p_prev = 1
eps = 0.9
steps = 3 * int(2*np.pi / eps + 0.1)
for i in range(0, steps, 1):
p_half = p_prev - eps/2 * x_prev
x_next = x_prev + eps * p_half
p_next = p_half - eps/2 * x_next
ax4.plot([x_prev, x_next], [p_prev, p_next], marker='o', color='blue', markersize=5)
x_prev, p_prev = x_next, p_next
```
# Combined Figure - Square
```
fig, ((ax1, ax2)) = plt.subplots(1, 2, figsize=(15, 7.5))
# subplot1
ax1.set_title("Euler's Method (eps=0.2)")
ax1.set_xlabel("position (q)")
ax1.set_ylabel("momentum (p)")
for i in range(0, len(t), 1):
ax1.plot(x[i:i+2], p[i:i+2], color='black', markersize=0)
def draw_square(ax, x, p, **args):
assert len(x) == len(p) == 4
x = list(x) + [x[0]]
p = list(p) + [p[0]]
ax.plot(x, p, **args)
def euler_update(x, p, eps):
assert len(x) == len(p) == 4
x_next = [0.]* 4
p_next = [0.]* 4
for i in range(4):
x_next[i] = x[i] + eps * p[i]
p_next[i] = p[i] - eps * x[i]
return x_next, p_next
def mod_euler_update(x, p, eps):
assert len(x) == len(p) == 4
x_next = [0.]* 4
p_next = [0.]* 4
for i in range(4):
x_next[i] = x[i] + eps * p[i]
p_next[i] = p[i] - eps * x_next[i]
return x_next, p_next
delta = 0.1
eps = 0.2
x_prev = np.array([0.0, 0.0, delta, delta]) + 0.0
p_prev = np.array([0.0, delta, delta, 0.0]) + 1.0
steps = int(2*np.pi / eps)
for i in range(0, steps, 1):
draw_square(ax1, x_prev, p_prev, marker='o', color='blue', markersize=5)
x_next, p_next = euler_update(x_prev, p_prev, eps)
x_prev, p_prev = x_next, p_next
# subplot2
ax2.set_title("Modified Euler's Method (eps=0.2)")
ax2.set_xlabel("position (q)")
ax2.set_ylabel("momentum (p)")
for i in range(0, len(t), 1):
ax2.plot(x[i:i+2], p[i:i+2], color='black', markersize=0)
x_prev = np.array([0.0, 0.0, delta, delta]) + 0.0
p_prev = np.array([0.0, delta, delta, 0.0]) + 1.0
for i in range(0, steps, 1):
draw_square(ax2, x_prev, p_prev, marker='o', color='blue', markersize=5)
x_next, p_next = mod_euler_update(x_prev, p_prev, eps)
x_prev, p_prev = x_next, p_next
```
| github_jupyter |
# Import and settings
```
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.path import Path
import matplotlib.patches as patches
from snaptools import manipulate as man
from snaptools import snapio
from snaptools import plot_tools
from snaptools import utils
from scipy.stats import binned_statistic
from mpl_toolkits.axes_grid1 import Grid
from snaptools import simulation
from snaptools import snapshot
from snaptools import measure
from pathos.multiprocessing import ProcessingPool as Pool
from mpl_toolkits.axes_grid1 import make_axes_locatable
import h5py
import pandas as PD
from scipy.interpolate import interp2d
colors = ['#332288', '#CC6677', '#6699CC', '#117733']
import matplotlib
matplotlib.rc('xtick', labelsize=10)
matplotlib.rc('ytick', labelsize=10)
matplotlib.rc('lines', linewidth=3)
%matplotlib inline
```
# Snapshot
```
settings = plot_tools.make_defaults(first_only=True, com=True, xlen=20, ylen=20, in_min=0)
snap = io.load_snap('/usr/users/spardy/coors2/hpc_backup/working/Gas/Dehnen_LMC/collision/output_Dehnen_smc_45deg/snap_007.hdf5')
velfield = snap.to_velfield(parttype='gas', write=False, first_only=True, com=True)
centDict = snap.find_centers(settings)
com1, com2, gal1id, gal2id = snap.center_of_mass('stars')
velx = snap.vel['stars'][gal1id, 0]
vely = snap.vel['stars'][gal1id, 1]
velz = snap.vel['stars'][gal1id, 2]
posx = snap.pos['stars'][gal1id, 0]
posy = snap.pos['stars'][gal1id, 1]
posz = snap.pos['stars'][gal1id, 2]
posx -= com1[0]
posy -= com1[1]
posz -= com1[2]
x_axis = np.linspace(-15, 15, 512)
y_axis = x_axis
X, Y = np.meshgrid(x_axis, y_axis)
angle = np.arctan2(X, Y)
R = np.sqrt(X**2 + Y**2)*(-1)**(angle < 0)
# Use arctan to make all R values negative on other side of Y axis
#sparse_vfield = snap.to_velfield(lengthX=10, lengthY=10, BINS=128, write=False, first_only=True, com=True)
settings = plot_tools.make_defaults(first_only=True, com=True, xlen=10, ylen=10, in_min=0, BINS=128)
Z2 = snap.to_cube(theta=45, write=False, first_only=True, com=True, BINS=128, lengthX=10, lengthY=10)
mom1 = np.zeros((128, 128))
velocities = np.linspace(-200, 200, 100)
for i in xrange(Z2.shape[2]):
mom1 += Z2[:,:,i]*velocities[i]
mom1 /= np.sum(Z2, axis=2)
sparse_vfield = mom1
sparse_vfield[sparse_vfield != sparse_vfield] = 0
sparse_X, sparse_Y = np.meshgrid(np.linspace(-10, 10, 128), np.linspace(-10, 10, 128))
with file('./vels_i45deg.txt', 'w') as velfile:
velfile.write(' X Y VEL EVEL\n')
velfile.write(' asec asec km/s km/s\n')
velfile.write('-----------------------------------------\n')
for xi, yi, vi in zip(sparse_X.flatten(), sparse_Y.flatten(), sparse_vfield.flatten()):
velfile.write('%3.2f %3.2f %3.2f 0.001\n' % (xi, yi, vi))
com1, com2, gal1id, gal2id = snap.center_of_mass('stars')
v1 = snap.vel['stars'][gal1id, :].mean(axis=0)
v2 = snap.vel['stars'][gal2id, :].mean(axis=0)
print(np.sqrt(np.sum((v1-v2)**2)))
```
# Measure Velocities from Velfield
```
# Now try with the velfield
settings = plot_tools.make_defaults(first_only=True, com=True, xlen=20, ylen=20, in_min=0)
binDict = snap.bin_snap(settings)
Z2 = binDict['Z2']
measurements = man.fit_contours(Z2, settings, plot=True)
#measurementsV2 = man.fit_contours(~np.isnan(velfield), settingsV, plot=True, numcontours=1)
length = 10
thick = 0.1
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
fig, axes = plt.subplots(2, 2, figsize=(10, 10))
axes = axes.flatten()
if len(np.where(measurements['eccs'] > 0.5)[0]) > 0:
bar_ind = np.max(np.where(measurements['eccs'] > 0.5)[0])
theta = measurements['angles'][bar_ind]
else:
theta = measurements['angles'][measurements['angles'] == measurements['angles']][-1]
#print(theta)
r = 0
im = axes[1].imshow(velfield, origin='lower', extent=[-20, 20, -20, 20], cmap='gnuplot')
im = axes[3].imshow(velfield, origin='lower', extent=[-20, 20, -20, 20], cmap='gnuplot')
#fig.colorbar(im)
#axes[1].add_artist(measurementsV['ellipses'][0])
#axes[3].add_artist(measurementsV2['ellipses'][0])
for i, t in enumerate(np.radians([theta, theta+90])):
x = r*np.sin(t)
y = r*np.cos(t)
verts = [
[length*np.cos(t)-thick*np.sin(t)-x,
length*np.sin(t)+thick*np.cos(t)+y],
[length*np.cos(t)+thick*np.sin(t)-x,
length*np.sin(t)-thick*np.cos(t)+y],
[-length*np.cos(t)+thick*np.sin(t)-x,
-length*np.sin(t)-thick*np.cos(t)+y],
[-length*np.cos(t)-thick*np.sin(t)-x,
-length*np.sin(t)+thick*np.cos(t)+y],
[0, 0]]
path = Path(verts, codes)
within_box = path.contains_points(np.array([X.flatten(), Y.flatten()]).T)
s = R.flatten()[within_box].argsort()
dist = R.flatten()[within_box][s]
vel = velfield.flatten()[within_box][s]
vel, binEdges, binNum = binned_statistic(dist, vel, bins=50)
rcoord = binEdges[np.nanargmin(np.abs(vel))]
print(np.abs(vel))
print(rcoord)
axes[i*2].set_title(str(i))
divider = make_axes_locatable(axes[i*2])
axOff = divider.append_axes("bottom", size=1.5, pad=0.1)
axes[i*2].set_xticks([])
axOff.set_xlabel('R [kpc]')
axOff.set_ylabel('Velocity [km s$^{-1}$]')
axOff.axvline(x=rcoord)
axOff.plot(binEdges[:-1], vel, 'b.')
axes[i*2].plot(binEdges[:-2], np.diff(vel), 'b.')
#diffs = np.abs(np.diff(vel))
#if np.any(diffs == diffs):
#rcoord = binEdges[np.nanargmax(np.abs(np.diff(vel)))]
xcoord = np.cos(t)*rcoord
ycoord = np.sin(t)*rcoord
patch = patches.PathPatch(path, facecolor='none', lw=2, alpha=0.5)
axes[1+2*i].add_patch(patch)
#axes[1+2*i].text((1+length)*np.cos(t)-thick*np.sin(t)-x,
# (1+length)*np.sin(t)+thick*np.cos(t)+y,
# str(i), fontsize=15, color='black')
axes[1+2*i].plot(xcoord, ycoord, 'k+', markersize=15, markeredgewidth=2)
axes[1+2*i].plot(centDict['barCenter'][0], centDict['barCenter'][1], 'g^', markersize=15, markeredgewidth=1, markerfacecolor=None)
axes[1+2*i].plot(centDict['haloCenter'][0], centDict['haloCenter'][1], 'bx', markersize=15, markeredgewidth=2)
axes[1+2*i].plot(centDict['diskCenters'][0], centDict['diskCenters'][1], 'c*', markersize=15, markeredgewidth=2)
centDict
#plt.tight_layout()
plt.show()
print np.nanargmin(np.abs(vel))
print binEdges[np.nanargmin(np.abs(vel))]
#fig, axes = plt.subplots(1, 2, figsize=(20, 10))
fig, axis = plt.subplots(1, figsize=(10,10))
#plot_tools.plot_contours(density, measurements, 0, -1, [0, 0], settings, axis=axis)
im = axis.imshow(velfield, origin='lower', extent=[-15, 15, -15, 15], cmap='gnuplot')
#axes[1].imshow(mom1, origin='lower', extent=[-15, 15, -15, 15], cmap='gnuplot')
fig.colorbar(im)
length = 10
thick = 0.1
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
theta = 110
for i, r in enumerate(xrange(-5, 5, 1)):
x = r*np.sin(np.radians(theta))
y = r*np.cos(np.radians(theta))
verts = [
[length*np.cos(np.radians(theta))-thick*np.sin(np.radians(theta))-x,
length*np.sin(np.radians(theta))+thick*np.cos(np.radians(theta))+y],
[length*np.cos(np.radians(theta))+thick*np.sin(np.radians(theta))-x,
length*np.sin(np.radians(theta))-thick*np.cos(np.radians(theta))+y],
[-length*np.cos(np.radians(theta))+thick*np.sin(np.radians(theta))-x,
-length*np.sin(np.radians(theta))-thick*np.cos(np.radians(theta))+y],
[-length*np.cos(np.radians(theta))-thick*np.sin(np.radians(theta))-x,
-length*np.sin(np.radians(theta))+thick*np.cos(np.radians(theta))+y],
[0, 0]]
path = Path(verts, codes)
patch = patches.PathPatch(path, facecolor='none', lw=2, alpha=0.75)
axis.add_patch(patch)
axis.text((1+length)*np.cos(np.radians(theta))-thick*np.sin(np.radians(theta))-x,
(1+length)*np.sin(np.radians(theta))+thick*np.cos(np.radians(theta))+y,
str(i), fontsize=15, color='black')
#axes[0].set_xlim(-15,15)
#axes[0].set_ylim(-15,15)
# Now try with the velfield
length = 10
thick = 0.1
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
fig, axes = plt.subplots(2, 5, figsize=(20, 6))
axes = axes.flatten()
theta = np.radians(110)
for i, r in enumerate(xrange(-5, 5, 1)):
x = r*np.sin(theta)
y = r*np.cos(theta)
verts = [
[length*np.cos(theta)-thick*np.sin(theta)-x,
length*np.sin(theta)+thick*np.cos(theta)+y],
[length*np.cos(theta)+thick*np.sin(theta)-x,
length*np.sin(theta)-thick*np.cos(theta)+y],
[-length*np.cos(theta)+thick*np.sin(theta)-x,
-length*np.sin(theta)-thick*np.cos(theta)+y],
[-length*np.cos(theta)-thick*np.sin(theta)-x,
-length*np.sin(theta)+thick*np.cos(theta)+y],
[0, 0]]
path = Path(verts, codes)
within_box = path.contains_points(np.array([X.flatten(), Y.flatten()]).T)
s = Y.flatten()[within_box].argsort()
dist = Y.flatten()[within_box][s]
vel = velfield.flatten()[within_box][s]
vel, binEdges, binNum = binned_statistic(dist, vel, bins=50)
axes[i].set_title(str(i))
divider = make_axes_locatable(axes[i])
axOff = divider.append_axes("bottom", size=1, pad=0.1)
axes[i].set_xticks([])
axOff.plot(binEdges[:-1], vel, 'b.')
axes[i].plot(binEdges[:-2], np.diff(vel), 'b.')
xcoord = binEdges[np.nanargmax(np.abs(np.diff(vel)))]
ycoord = np.tan(theta)*xcoord
print(xcoord, ycoord)
#plt.tight_layout()
plt.show()
!ls ../
```
##Plot 2d velocities vs. disk fit
```
names = [r'$\theta = 45$', r'$\theta = 90$',
r'$\theta = 0$', r'$\theta = 0$ - Retrograde']
fig = plt.figure(figsize=(15, 10))
colors = ['#332288', '#CC6677', '#6699CC', '#117733']
grid = Grid(fig, 111,
nrows_ncols=(2, 2),
axes_pad=0.0,
label_mode="L",
share_all=True
)
groups = ['45deg',
'90deg',
'0deg',
'0deg_retro']
for group, ax in zip(groups, grid):
with h5py.File('../Data/offSetsDehnen_best.hdf5', 'r') as offsets:
centers = offsets['/stars/%s/' % group]
haloCenters = centers['halo_pos'][()]
diskCenters = centers['disk_pos'][()]
times = centers['time'][()]
velcents2d = np.loadtxt('/usr/users/spardy/coors/data/2dVels/xy_%s.txt' % group)
velcents2d = np.array(velcents2d).reshape(len(velcents2d)/2, 2, order='F')
ax.plot(times[:-1],
np.sqrt(np.sum((diskCenters[:-1, :]-haloCenters[:-1, :])**2, axis=1)),
label='Photometric')
ax.plot(times[:-1],
np.sqrt(np.sum((velcents2d-haloCenters[:-1, :])**2, axis=1)),
label='2D Velocity', color=colors[1])
for i, (ax, name) in enumerate(zip(grid, names)):
if i == 0:
yticks = ax.yaxis.get_major_ticks()
yticks[0].label1.set_visible(False)
ax.set_xlim(0, 1.9)
#ax.set_ylim(0, 4.0)
#ax.errorbar([-0.75], [1.1], yerr=distErrs, label='Typical Error')
ax.legend(fancybox=True, loc='upper right')
if (i == 0) or (i == 2):
ax.set_ylabel('Offset from Halo \nCenter [kpc]', fontsize=20)
#axOff.set_ylabel('D$_{Disk}$ - D$_{Bar}$ \n [kpc]', fontsize=20)
ax.set_xlabel("Time [Gyr]", fontsize=20)
ax.annotate(name, xy=(0.05, 0.8), color='black', xycoords='axes fraction',
bbox=dict(facecolor='gray', edgecolor='black',
boxstyle='round, pad=1', alpha=0.5))
plt.subplots_adjust(wspace=0.04) # Default is 0.2
plt.savefig('../../Offsets_paper/plots/velocity_centers.pdf', dpi=600)
fig, axes = plt.subplots(1, 3, figsize=(22.5, 7.5))
with h5py.File('/usr/users/spardy/velocity_offsets.hdf5', 'r') as velFile:
grp = velFile['Dehnen_45deg/']
velcents2d = np.loadtxt('/usr/users/spardy/coors/data/2dVels/xy.txt')
velcents2d = np.array(velcents2d).reshape(len(velcents2d)/2, 2, order='F')
# Minor Axis
velCenters = np.sqrt(np.sum(grp['velCenters'][()]**2, axis=1))
velCent = velCenters[:, 1]
axes[0].plot(times, velCent, zorder=-1, label='Minor-Axis', color=colors[1], linestyle='--')
velCent = PD.rolling_mean(velCenters[:, 1], 3)
times = grp['time'][()]
axes[0].plot(times, np.sqrt(np.sum(grp['diskCenters'][()]**2, axis=1)), label='Disk')
axes[0].plot(times, velCent, zorder=-1, label='Avg.', color='gray')
# major axis
velCent = velCenters[:, 0]
axes[1].plot(times, velCent, zorder=-1, label='Major-Axis', color=colors[1], linestyle='--')
velCent = PD.rolling_mean(velCenters[:, 1], 3)
axes[1].plot(times, np.sqrt(np.sum(grp['diskCenters'][()]**2, axis=1)), label='Disk')
axes[1].plot(times, velCent, zorder=-1, label='Avg.', color='gray')
# 2d fit
axes[2].plot(times, np.sqrt(np.sum(grp['diskCenters'][()]**2, axis=1)), label='Disk')
axes[2].plot(times, np.sqrt(np.sum(velcents2d**2, axis=1)), label='2D Velocity', color=colors[1])
for axis in axes:
axis.legend()
axis.set_xlabel('Time [Gyr]')
axis.set_ylabel('Distance from Frame Center [kpc]')
data = np.loadtxt("/usr/users/spardy/coors/data/2dVels/vel008_0.txt", skiprows=3, usecols=(0,1,2))
model = np.loadtxt("/usr/users/spardy/coors/data/2dVels/LMC_OUT_0/vel008_0.mod", skiprows=2, usecols=(0,1,2))
#dataX = data[:, 0].reshape(256, 256)
#dataY = data[:, 1].reshape(256, 256)
dataZ = data[:, 2].reshape(256, 256)
print dataZ.shape
#ataF = interp2d(data[:, 0], data[:, 1], data[:, 2]
print model.shape
binsize = 20./256.
Xind = np.array(np.floor((model[:, 0]+10)/binsize)).astype(int)
Yind = np.array(np.floor((model[:, 1]+10)/binsize)).astype(int)
#modelX = model[:, 0].reshape(sz, sz)
#modelY = model[:, 1].reshape(sz, sz)
#modelZ = model[:, 2].reshape(sz, sz)
#XIND, YIND = np.meshgrid(Xind, Yind)
sparseImg = np.ones((256, 256))*np.nan
#sparseImg[XIND, YIND] = dataZ[XIND, YIND]
sparseModel = np.ones((256, 256))*np.nan
for xi, yi, z in zip(Xind, Yind, model[:, 2]):
sparseModel[xi, yi] = z
sparseImg[Xind, Yind] = dataZ[Xind, Yind]
fig, axes = plt.subplots(1, 3, figsize=(15, 5))
axes[0].imshow(sparseImg, extent=[-10, 10, -10, 10])
axes[1].imshow(sparseModel.T, extent=[-10, 10, -10, 10])
axes[2].imshow(sparseModel.T-sparseImg, extent=[-10, 10, -10, 10])
#axes[1].plot(model[:, 0], model[:, 2])
```
# OLD STUFF
```
theta = np.radians(20)
r = 0
x = r*np.sin(theta)
y = r*np.cos(theta)
verts = [
[length*np.cos(theta)-thick*np.sin(theta)-x,
length*np.sin(theta)+thick*np.cos(theta)+y],
[length*np.cos(theta)+thick*np.sin(theta)-x,
length*np.sin(theta)-thick*np.cos(theta)+y],
[-length*np.cos(theta)+thick*np.sin(theta)-x,
-length*np.sin(theta)-thick*np.cos(theta)+y],
[-length*np.cos(theta)-thick*np.sin(theta)-x,
-length*np.sin(theta)+thick*np.cos(theta)+y],
[0, 0]]
path = Path(verts, codes)
within_box = path.contains_points(np.array([X.flatten(), Y.flatten()]).T)
s = Y.flatten()[within_box].argsort()
dist = Y.flatten()[within_box][s]
vel = velfield.flatten()[within_box][s]
vel, binEdges, binNum = binned_statistic(dist, vel, bins=50)
xcoord = binEdges[np.nanargmax(np.abs(np.diff(vel)))]
ycoord = np.tan(theta)*xcoord
print(xcoord, ycoord)
# MOM1 maps
settings = plot_tools.make_defaults(first_only=True, com=True, xlen=20, ylen=20, in_min=0)
Z2 = snap.to_cube(theta=20, write=False, first_only=True, com=True)
mom1 = np.zeros((512, 512))
velocities = np.linspace(-200, 200, 100)
for i in xrange(Z2.shape[2]):
mom1 += Z2[:,:,i]*velocities[i]
mom1 /= np.sum(Z2, axis=2)
x_axis = np.linspace(-15, 15, 512)
y_axis = x_axis
X, Y = np.meshgrid(x_axis, y_axis)
density = np.sum(Z2, axis=2)
density[density > 0] = np.log10(density[density > 0])
settings = plot_tools.make_defaults(xlen=20, ylen=20, in_min=0, in_max=6)
measurements = man.fit_contours(density, settings, plot=True)
#Using the moment1 map
length = 10
thick = 0.1
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
fig, axes = plt.subplots(2, 5, figsize=(20, 6))
axes = axes.flatten()
theta = np.radians(measurements['angles'][0]-90)
for i, r in enumerate(xrange(-5, 5, 1)):
x = r*np.sin(theta)
y = r*np.cos(theta)
verts = [
[length*np.cos(theta)-thick*np.sin(theta)-x,
length*np.sin(theta)+thick*np.cos(theta)+y],
[length*np.cos(theta)+thick*np.sin(theta)-x,
length*np.sin(theta)-thick*np.cos(theta)+y],
[-length*np.cos(theta)+thick*np.sin(theta)-x,
-length*np.sin(theta)-thick*np.cos(theta)+y],
[-length*np.cos(theta)-thick*np.sin(theta)-x,
-length*np.sin(theta)+thick*np.cos(theta)+y],
[0, 0]]
path = Path(verts, codes)
within_box = path.contains_points(np.array([X.flatten(), Y.flatten()]).T)
s = X.flatten()[within_box].argsort()
dist = X.flatten()[within_box][s]
vel = mom1.flatten()[within_box][s]
vel, binEdges, binNum = binned_statistic(dist, vel, bins=50)
axes[i].set_title(str(i))
divider = make_axes_locatable(axes[i])
axOff = divider.append_axes("bottom", size=1, pad=0.1)
axes[i].set_xticks([])
axOff.plot(binEdges[:-1], vel, 'b.')
axes[i].plot(binEdges[:-2], np.diff(vel), 'b.')
#plt.tight_layout()
plt.show()
for i, theta in enumerate(xrange(0, 180, 18)):
verts = [
[length*np.cos(np.radians(theta))-thick*np.sin(np.radians(theta)),
length*np.sin(np.radians(theta))+thick*np.cos(np.radians(theta))],
[length*np.cos(np.radians(theta))+thick*np.sin(np.radians(theta)),
length*np.sin(np.radians(theta))-thick*np.cos(np.radians(theta))],
[-length*np.cos(np.radians(theta))+thick*np.sin(np.radians(theta)),
-length*np.sin(np.radians(theta))-thick*np.cos(np.radians(theta))],
[-length*np.cos(np.radians(theta))-thick*np.sin(np.radians(theta)),
-length*np.sin(np.radians(theta))+thick*np.cos(np.radians(theta))],
[0, 0]]
path = Path(verts, codes)
patch = patches.PathPatch(path, facecolor='none', lw=2, alpha=0.75)
axes[1].add_patch(patch)
axes[1].text((1+length)*np.cos(np.radians(theta))-thick*np.sin(np.radians(theta)),
(1+length)*np.sin(np.radians(theta))+thick*np.cos(np.radians(theta)),
str(i), fontsize=15)
x1 = 10
dy = 0.1
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
fig, axes = plt.subplots(2, 5, figsize=(20, 4))
axes = axes.flatten()
for i, theta in enumerate(xrange(0, 180, 18)):
verts = [
[x1*np.cos(np.radians(theta))-dy*np.sin(np.radians(theta)),
x1*np.sin(np.radians(theta))+dy*np.cos(np.radians(theta))],
[x1*np.cos(np.radians(theta))+dy*np.sin(np.radians(theta)),
x1*np.sin(np.radians(theta))-dy*np.cos(np.radians(theta))],
[-x1*np.cos(np.radians(theta))+dy*np.sin(np.radians(theta)),
-x1*np.sin(np.radians(theta))-dy*np.cos(np.radians(theta))],
[-x1*np.cos(np.radians(theta))-dy*np.sin(np.radians(theta)),
-x1*np.sin(np.radians(theta))+dy*np.cos(np.radians(theta))],
[0, 0]]
path = Path(verts, codes)
within_box = path.contains_points(np.array([X.flatten(), Y.flatten()]).T)
axes[i].plot(X.flatten()[within_box], mom1.flatten()[within_box], 'b.')
axes[i].set_title(str(i))
plt.tight_layout()
plt.show()
x1 = 10
dy = 0.1
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
fig, axes = plt.subplots(2, 5, figsize=(20, 4))
axes = axes.flatten()
for i, y in enumerate(xrange(-10, 10, 2)):
verts = [
[x1, dy+y],
[x1, -dy+y],
[-x1, -dy+y],
[-x1, dy+y],
[0, 0]]
path = Path(verts, codes)
patch = patches.PathPatch(path, facecolor='none', lw=2)
#axes[i].add_patch(patch)
#axes[i].set_xlim(-20,20)
#axes[i].set_ylim(-20,20)
within_box = path.contains_points(np.array([posx, posy]).T)
axes[i].plot(posx[within_box[::10]], vely[within_box[::10]], 'b.')
plt.show()
#fig = plt.figure()
#ax = fig.add_subplot(111)
#patch = patches.PathPatch(path, facecolor='none', lw=2)
#ax.add_patch(patch)
#ax.set_xlim(-20,20)
#ax.set_ylim(-20,20)
#plt.show()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/Ciiku-Kihara/LOAN-APPROVAL-PROJECT/blob/main/THE_LOAN_APPROVAL_PROJECT.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
## A CASE STUDY OF FACTORS AFFECTING LOAN APPROVAL
## 1. Defining the question
### a) Specifying the analysis question
Is there a relationship between gender, credit history and the area one lives and loan status?
### b) Defining the metric for success
Be able to obtain and run statistically correct hypothesis tests, and come to a meaningful conclusion
### c) Understanding the context
In finance, a loan is the lending of money by one or more individuals, organizations, or other entities to other individuals and organizations.
Borrowing a Loan will build your confidence in securing a loan. If you repay well your loan, you will have a good credit history and stand a chance of more loan. Borrowing loan is important. It helps you when you don't have cash on hand and will are of great help whenever you are in a fix.
### d) Recording the experimental design
We will be conducting Exploratory data analysis which includes Univariate analysis, Bivariate and multivariate analysis.
In order to answer our research question we will be carrying out hypothesis testing using Chi-square test to get the relationships and differences between our independent and target variables hence coming up with significant conclusions.
### e) Data Relevance
The dataset contains demographic information on factors that determine whether one gets a loan or not.
This data was extracted from Kaggle, which is a reputable organization.
The information contained in our dataset was relevant for our analysis.
## 2. Importing relevant libraries
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import f_oneway
from scipy.stats import ttest_ind
import scipy.stats as stats
from sklearn.decomposition import PCA
```
## 3. Loading and checking the data
```
# Loading our dataset
loans_df = pd.read_csv('loans.csv')
# Getting a preview of the first 10 rows
loans_df.head(10)
# Determining the number of rows and columns in the dataset
loans_df.shape
# Determining the names of the columns present in the dataset
loans_df.columns
# Description of the quantitative columns
loans_df.describe()
# Description of the qualitative columns
loans_df.describe(include = 'object')
# Checking if each column is of the appropriate data type
loans_df.info()
```
## 4. External data source validation
> We validated our dataset using information from the following link:
> http://calcnet.mth.cmich.edu/org/spss/prj_loan_data.htm
## 5. Data cleaning
Uniformity
```
# Changing all column names to lowercase, stripping white spaces
# and removing all underscores
loans_df.columns = loans_df.columns.str.lower().str.strip().str.replace("_","")
# Confirming the changes made
loans_df.head(5)
```
Data Completeness
```
# Determining the number of null values in each column
loans_df.isnull().sum()
#Imputing Loan Amount with mean
loans_df['loanamount'] = loans_df['loanamount'].fillna(loans_df['loanamount'].mean())
#FowardFill For LoanTerm
loans_df['loanamountterm'] = loans_df['loanamountterm'].fillna(method = "ffill")
#Assuming Missing values imply bad credit History - replacing nulls with 0
loans_df['credithistory'] = loans_df['credithistory'].fillna(0)
#Imputing gender, married, and selfemployed
loans_df['dependents']=loans_df['dependents'].fillna(loans_df['dependents'].mode()[0])
loans_df['gender']=loans_df['gender'].fillna(loans_df['gender'].mode()[0])
loans_df['married']=loans_df['married'].fillna(loans_df['married'].mode()[0])
loans_df['selfemployed']=loans_df['selfemployed'].fillna(loans_df['selfemployed'].mode()[0])
# Confirming our changes after dealing with null values
loans_df.isnull().sum()
# Previewing the data
loans_df.head(10)
```
Data Consistency
```
# Checking if there are any duplicated rows
loans_df.duplicated().sum()
# Checking for any anomalies in the qualitative variables
qcol = ['gender', 'married', 'dependents', 'education',
'selfemployed','credithistory', 'propertyarea', 'loanstatus']
for col in qcol:
print(col, ':', loans_df[col].unique())
#Checking for Outliers
cols = ['applicantincome','coapplicantincome', 'loanamount', 'loanamountterm']
for column in cols:
plt.figure()
loans_df.boxplot([column], fontsize= 12)
plt.ylabel('count', fontsize = 12)
plt.title('Boxplot - {}'.format(column), fontsize = 16)
# Determining how many rows would be lost if outliers were removed
# Calculating our first, third quantiles and then later our IQR
# ---
Q1 = loans_df.quantile(0.25)
Q3 = loans_df.quantile(0.75)
IQR = Q3 - Q1
# Removing outliers based on the IQR range and stores the result in the data frame 'auto'
# ---
#
loans_df_new = loans_df[~((loans_df < (Q1 - 1.5 * IQR)) | (loans_df > (Q3 + 1.5 * IQR))).any(axis=1)]
# Printing the shape of our new dataset
# ---
#
print(loans_df_new.shape)
# Printing the shape of our old dataset
# ---
#
print(loans_df.shape)
# Number of rows removed
rows_removed = loans_df.shape[0] - loans_df_new.shape[0]
rows_removed
# Percentage of rows removed of the percentage
row_percent = (rows_removed/loans_df.shape[0]) * 100
row_percent
# Exporting our data
loans_df.to_csv('loanscleaned.csv')
```
## 6. Exploratory Data Analysis
### a) Univariate Analysis
```
# Previewing the dataset
loans_df.head(4)
# Loan Status
Yes = loans_df[loans_df["loanstatus"] == 'Y'].shape[0]
No = loans_df[loans_df["loanstatus"] == 'N'].shape[0]
print(f"Yes = {Yes}")
print(f"No = {No}")
print(' ')
print(f"Proportion of Yes = {(Yes / len(loans_df['loanstatus'])) * 100:.2f}%")
print(f"Proportion of No = {(No / len(loans_df['loanstatus'])) * 100:.2f}%")
print(' ')
plt.figure(figsize=(10, 8))
sns.countplot(x = loans_df["loanstatus"])
plt.xticks((0, 1), ["Yes", "No"], fontsize = 14)
plt.xlabel("Loan Approval Status", fontsize = 14)
plt.ylabel("Frequency", fontsize = 14)
plt.title("Number of Approved and Disapproved Loans", y=1, fontdict={"fontsize": 20});
# Pie Chart for Gender
gender = loans_df.gender.value_counts()
plt.figure(figsize= (8,5), dpi=100)
# Highlighting yes
explode = (0.1, 0)
colors = ['blue', 'orange']
# Plotting our pie chart
gender.plot.pie(explode = explode, colors = colors, autopct='%1.1f%%', shadow=True, startangle=140)
plt.axis('equal')
plt.title('Pie chart of Gender Distribution')
plt.show()
# Pie Chart for Education
education = loans_df.education.value_counts()
plt.figure(figsize= (8,5), dpi=100)
# Highlighting yes
explode = (0.1, 0)
colors = ['blue', 'orange']
# Plotting our pie chart
education.plot.pie(explode = explode, colors = colors, autopct='%1.1f%%', shadow=True, startangle=140)
plt.axis('equal')
plt.title('Pie chart of Education')
plt.show()
# Marital status
Yes = loans_df[loans_df["married"] == 'Yes'].shape[0]
No = loans_df[loans_df["married"] == 'No'].shape[0]
print(f"Yes = {Yes}")
print(f"No = {No}")
print(' ')
print(f"Proportion of Yes = {(Yes / len(loans_df['married'])) * 100:.2f}%")
print(f"Proportion of No = {(No / len(loans_df['married'])) * 100:.2f}%")
print(' ')
plt.figure(figsize=(10, 8))
sns.countplot(x = loans_df["married"])
plt.xticks((0, 1), ["No", "Yes"], fontsize = 14)
plt.xlabel("Marital Status", fontsize = 14)
plt.ylabel("Frequency", fontsize = 14)
plt.title("Marital Status", y=1, fontdict={"fontsize": 20});
# Frequency table for Property Area in percentage
round(loans_df.propertyarea.value_counts(normalize = True),2)
# Pie Chart for Credit History
credit = loans_df.credithistory.value_counts()
plt.figure(figsize= (8,5), dpi=100)
# Highlighting yes
explode = (0.1, 0)
colors = ['blue', 'orange']
# Plotting our pie chart
credit.plot.pie(explode = explode, colors = colors, autopct='%1.1f%%', shadow=True, startangle=140)
plt.axis('equal')
plt.title('Pie chart of Credit History')
plt.show()
# Frequency table for Self Employed status in percentage
round(loans_df.selfemployed.value_counts(normalize = True),2)
# Frequency table for Dependents in percentage
round(loans_df.dependents.value_counts(normalize = True),2)
# Histogram for Applicant Income
def histogram(var1, bins):
plt.figure(figsize= (10,8)),
sns.set_style('darkgrid'),
sns.set_palette('colorblind'),
sns.histplot(x = var1, data=loans_df, bins = bins , shrink= 0.9, kde = True)
histogram('applicantincome', 50)
plt.title('Histogram of the Applicant Income', fontsize = 16)
plt.xlabel('Applicant Income', fontsize = 14)
plt.ylabel('Count', fontsize = 14)
plt.xticks(fontsize = 14)
plt.yticks(fontsize = 14)
plt.show()
# Checking on coefficent of variance, skewness and kurtosis
print('The skewness is:', loans_df['applicantincome'].skew())
print('The kurtosis is:', loans_df['applicantincome'].kurt())
print('The coefficient of variation is:', loans_df['applicantincome'].std()/loans_df['applicantincome'].mean())
# Histogram for Loan Amount
histogram('loanamount', 50)
plt.title('Histogram of the Loan Amount Given', fontsize = 16)
plt.xlabel('Applicant Income', fontsize = 14)
plt.ylabel('Count', fontsize = 14)
plt.xticks(fontsize = 14)
plt.yticks(fontsize = 14)
plt.show()
# Checking on coefficent of variance, skewness and kurtosis
print('The skewness is:', loans_df['loanamount'].skew())
print('The kurtosis is:', loans_df['loanamount'].kurt())
print('The coefficient of variation is:', loans_df['loanamount'].std()/loans_df['loanamount'].mean())
# Histogram for Co-applicant Income
histogram('coapplicantincome', 50)
plt.title('Histogram of the Co-applicant Income', fontsize = 16)
plt.xlabel('Co-applicant Income', fontsize = 14)
plt.ylabel('Count', fontsize = 14)
plt.xticks(fontsize = 14)
plt.yticks(fontsize = 14)
plt.show()
# Checking on coefficent of variance, skewness and kurtosis
print('The skewness is:', loans_df['coapplicantincome'].skew())
print('The kurtosis is:', loans_df['coapplicantincome'].kurt())
print('The coefficient of variation is:', loans_df['coapplicantincome'].std()/loans_df['coapplicantincome'].mean())
# Looking at the unique variables in amount
loans_df.loanamountterm.unique()
# Measures of central tendency for our quantitative variables
loans_df.describe()
```
### b) Bivariate Analysis
```
# Preview of dataset
loans_df.head(3)
# Comparison of Self employment Status and Loan Status
table=pd.crosstab(loans_df['selfemployed'],loans_df['loanstatus'])
table.div(table.sum(1).astype(float), axis=0).plot(kind='bar', figsize= (10,8), stacked=False)
plt.title('Stacked Bar Chart of Self Employed to Loan Status', fontsize = 16)
plt.xlabel('Self Employed', fontsize = 14)
plt.ylabel('Proportion of Respondents', fontsize = 14)
plt.xticks(rotation = 360, fontsize = 14)
plt.yticks(fontsize = 14)
plt.show()
# Comparison of Education and Loan Status
table=pd.crosstab(loans_df['education'],loans_df['loanstatus'])
table.div(table.sum(1).astype(float), axis=0).plot(kind='bar', figsize = (10,8), stacked=False)
plt.title('Stacked Bar Chart of Education and Loan Status', fontsize = 16)
plt.xlabel('Education', fontsize = 14)
plt.ylabel('Proportion of Respondents', fontsize = 14)
plt.xticks(rotation = 360, fontsize = 14)
plt.yticks(fontsize = 14)
plt.show()
# Comparison of Gender and Loan Status
table=pd.crosstab(loans_df['gender'],loans_df['loanstatus'])
table.div(table.sum(1).astype(float), axis=0).plot(kind='bar',figsize = (10,8), stacked=False)
plt.title('Bar Chart of Gender to loanstatus', fontsize = 16)
plt.xlabel('Gender', fontsize = 14)
plt.ylabel('Proportion of Respondents', fontsize = 14)
plt.xticks(rotation = 360, fontsize = 14)
plt.yticks(fontsize = 14)
plt.show()
# Comparison of Marital Status and Loan Status
table=pd.crosstab(loans_df['married'],loans_df['loanstatus'])
table.div(table.sum(1).astype(float), axis=0).plot(kind='bar', figsize = (10,8), stacked=False)
plt.title('Bar Chart of Marital Status to Loan Status', fontsize = 16)
plt.xlabel('Marital Status',fontsize = 14)
plt.ylabel('Proportion of Respondents', fontsize = 14)
plt.xticks(rotation = 360, fontsize = 14)
plt.yticks(fontsize = 14)
plt.show()
# Comparison of Credit History and Loan Status
table=pd.crosstab(loans_df['credithistory'],loans_df['loanstatus'])
table.div(table.sum(1).astype(float), axis=0).plot(kind='bar', figsize = (10,8), stacked=False)
plt.title('Bar Chart of Credit History and Loanstatus', fontsize = 16)
plt.xlabel('Credit History', fontsize = 14)
plt.ylabel('Proportion of Respondents', fontsize = 14)
plt.xticks(rotation = 360, fontsize = 14)
plt.yticks(fontsize = 14)
plt.show()
# Comparison of Property Area and Loan Status
table=pd.crosstab(loans_df['propertyarea'],loans_df['loanstatus'])
table.div(table.sum(1).astype(float), axis=0).plot(kind='bar', figsize = (10,8), stacked=False)
plt.title('Bar Chart of Area and Loan Status', fontsize = 16)
plt.xlabel('Area', fontsize = 14)
plt.ylabel('Proportion of Respondents', fontsize = 14)
plt.xticks(rotation = 360, fontsize = 14)
plt.yticks(fontsize = 14)
plt.show()
# Comparison of Dependents and Loan Status
table=pd.crosstab(loans_df['dependents'],loans_df['loanstatus'])
table.div(table.sum(1).astype(float), axis=0).plot(kind='bar', figsize = (10,8), stacked=False)
plt.title('Bar Chart of Dependents and Loan Status', fontsize = 16)
plt.xlabel('Dependents', fontsize = 14)
plt.ylabel('Proportion of Respondents', fontsize = 14)
plt.xticks(rotation = 360, fontsize = 14)
plt.yticks(fontsize = 14)
plt.show()
#Scatterplot to show correlation between Applicant Income and Loan amount
plt.figure(figsize= (10,8))
sns.scatterplot(x= loans_df.applicantincome, y = loans_df.loanamount)
plt.title('Applicant Income Vs Loan Amount', fontsize = 16)
plt.ylabel('Loan Amount', fontsize=14)
plt.xlabel('Applicant Income', fontsize=14)
plt.xticks(rotation = 75, fontsize = 14)
plt.yticks(fontsize = 14)
plt.show()
# Correlation coefficient between applicant income and loan amount
loans_df['applicantincome'].corr(loans_df['loanamount'])
#Scatterplot to show correlation between Co-Applicant Income and Loan amount
plt.figure(figsize= (10,8))
sns.scatterplot(x= loans_df.coapplicantincome, y = loans_df.loanamount)
plt.title('Co-Applicant Income Vs Loan Amount', fontsize = 16)
plt.ylabel('Loan Amount', fontsize=14)
plt.xlabel('Co-Applicant Income', fontsize=14)
plt.xticks(rotation = 75, fontsize = 14)
plt.yticks(fontsize = 14)
plt.show()
# Correlation coefficient between loan amount and co-applicant income
loans_df['coapplicantincome'].corr(loans_df['loanamount'])
# Scatterplot between Co-applicant income and Loan amount for
# income less that 2000
loans_df[loans_df['coapplicantincome'] < 2000].sample(200).plot.scatter(x='applicantincome', y='loanamount')
# Correlation Heatmap
plt.figure(figsize=(7,4))
sns.heatmap(loans_df.corr(),annot=True,cmap='cubehelix_r')
plt.show()
```
### c) Multivariate Analysis
```
# Analysis of Loan Status, Applicant income and Loan Amount
plt.figure(figsize=(10,8))
sns.scatterplot(x= loans_df['loanamount'], y=loans_df['applicantincome'], hue= loans_df['loanstatus'])
plt.title('Loan Amount vs Applicant Income vs Loan Status', fontsize = 16)
plt.xlabel('Loan Amount', fontsize = 14)
plt.ylabel('Applicant Income', fontsize = 14)
plt.xticks(fontsize = 14)
plt.yticks(fontsize = 14)
# Analysis of Loan Status, Applicant income and Credit History
plt.figure(figsize=(10,8))
sns.scatterplot(x= loans_df['loanamount'], y=loans_df['applicantincome'], hue= loans_df['credithistory'])
plt.title('Loan Amount vs Applicant Income vs Credit History', fontsize = 16)
plt.xlabel('Loan Amount', fontsize = 14)
plt.ylabel('Applicant Income', fontsize = 14)
plt.xticks(fontsize = 14)
plt.yticks(fontsize = 14)
```
## 7. Hypothesis testing
- The Chi-square test will be used for all hypothesis tests in our analysis.
- The level of significance to be used in all tests below will be 0.05 or 5%
**Hypothesis 1:**
Ho : There is no relationship between credit history and the loan status
Ha : There is a relationship between credit history and the loan status
```
# Creating a crosstab
tab = pd.crosstab(loans_df['loanstatus'], loans_df['credithistory'])
tab
# Obtaining the observed values
observed_values = tab.values
print('Observed values: -\n', observed_values)
# Creating the chi square contingency table
val = stats.chi2_contingency(tab)
val
# Obtaining the expected values
expected_values = val[3]
expected_values
# Obtaining the degrees of freedom
rows = len(tab.iloc[0:2, 0])
columns = len(tab.iloc[0, 0:2])
dof = (rows-1)*(columns-1)
print('Degrees of Freedom', dof)
# Obtaining the chi-square statistic
chi_square =sum([(o-e)**2./e for o,e in zip(observed_values,expected_values)])
chi_square
chi_square_statistic = chi_square[0]+chi_square[1]
chi_square_statistic
# Getting the critical value
alpha = 0.05
critical_value = stats.chi2.ppf(q = 1-alpha, df = dof)
print('Critical Value:', critical_value)
# Getting p value
p_value = 1 - stats.chi2.cdf(x = chi_square_statistic, df= dof)
p_value
# Conclusion
if chi_square_statistic>=critical_value:
print('Reject Null Hypothesis')
else:
print('Do not Reject Null Hypothesis')
```
The chi-square statistic is greater than the critical value hence we reject the null hypothesis that there is no relationship between credit history and loan status
At 5% level of significance, there is enough evidence to conclude that there is a relationship between credit history and loan status
**Hypothesis 2 :**
Ho : There is no relationship between area and the loan status
Ha : There is a relationship between area and the loan status
```
# Creating a crosstab
tab = pd.crosstab(loans_df['loanstatus'], loans_df['propertyarea'])
tab
# Obtaining the observed values
observed_values = tab.values
print('Observed values: -\n', observed_values)
# Creating the chi square contingency table
val = stats.chi2_contingency(tab)
val
# Obtaining the expected values
expected_values = val[3]
expected_values
# Obtaining the degrees of freedom
rows = len(tab.iloc[0:2, 0])
columns = len(tab.iloc[0, 0:2])
dof = (rows-1)*(columns-1)
print('Degrees of Freedom', dof)
# Obtaining the chi-square statistic
chi_square =sum([(o-e)**2./e for o,e in zip(observed_values,expected_values)])
chi_square
chi_square_statistic = chi_square[0]+chi_square[1]
chi_square_statistic
# Getting the critical value
alpha = 0.05
critical_value = stats.chi2.ppf(q = 1-alpha, df = dof)
print('Critical Value:', critical_value)
# Getting p value
p_value = 1 - stats.chi2.cdf(x = chi_square_statistic, df= dof)
p_value
# Conclusion
if chi_square_statistic>=critical_value:
print('Reject Null Hypothesis')
else:
print('Do not Reject Null Hypothesis')
```
The chi-square statistic is greater than the critical value hence we reject the null hypothesis that there is no relationship between area and loan status
At 5% level of significance, there is enough evidence to conclude that there is a relationship between credit area and loan status
**Hypothesis 3 :**
Ho : There is no relationship between gender and the loan status
Ha : There is a relationship between gender and the loan status
```
# Creating a crosstab
tab = pd.crosstab(loans_df['loanstatus'], loans_df['gender'])
tab
# Obtaining the observed values
observed_values = tab.values
print('Observed values: -\n', observed_values)
# Creating the chi square contingency table
val = stats.chi2_contingency(tab)
val
# Obtaining the expected values
expected_values = val[3]
expected_values
# Obtaining the degrees of freedom
rows = len(tab.iloc[0:2, 0])
columns = len(tab.iloc[0, 0:2])
dof = (rows-1)*(columns-1)
print('Degrees of Freedom', dof)
# Obtaining the chi-square statistic
chi_square =sum([(o-e)**2./e for o,e in zip(observed_values,expected_values)])
chi_square
chi_square_statistic = chi_square[0]+chi_square[1]
chi_square_statistic
# Getting the critical value
alpha = 0.05
critical_value = stats.chi2.ppf(q = 1-alpha, df = dof)
print('Critical Value:', critical_value)
# Getting p value
p_value = 1 - stats.chi2.cdf(x = chi_square_statistic, df= dof)
p_value
# Conclusion
if chi_square_statistic>=critical_value:
print('Reject Null Hypothesis')
else:
print('Do not Reject Null Hypothesis')
```
The chi-square statistic is less than the critical value hence we do not reject the null hypothesis that there is no relationship between area and loan status
At 5% level of significance, there is not enough evidence to conclude that there is a relationship between credit area and loan status
## 8. Dimensionality reduction
```
# PCA analysis with One Hot Encoding
dummy_Gender = pd.get_dummies(loans_df['gender'], prefix = 'Gender')
dummy_Married = pd.get_dummies(loans_df['married'], prefix = "Married")
dummy_Education = pd.get_dummies(loans_df['education'], prefix = "Education")
dummy_Self_Employed = pd.get_dummies(loans_df['selfemployed'], prefix = "Selfemployed")
dummy_Property_Area = pd.get_dummies(loans_df['propertyarea'], prefix = "Property")
dummy_Dependents = pd.get_dummies(loans_df['dependents'], prefix = "Dependents")
dummy_Loan_status = pd.get_dummies(loans_df['loanstatus'], prefix = "Approve")
# Creating a list of our dummy data
frames = [loans_df,dummy_Gender,dummy_Married,dummy_Education,dummy_Self_Employed,dummy_Property_Area,dummy_Dependents,dummy_Loan_status]
# Combining the dummy data with our dataframe
df_train = pd.concat(frames, axis = 1)
# Previewing our training dataset
df_train.head(10)
# Dropping of non-numeric columns as part of pre-processing
df_train = df_train.drop(columns = ['loanid', 'gender', 'married', 'dependents', 'education','selfemployed', 'propertyarea','loanstatus','Approve_N'])
# Previewing the final dataset for our analysis
df_train
# Preprocessing
X=df_train.drop(['Approve_Y'],axis=1)
y=df_train['Approve_Y']
# Splitting into training and test
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
# Normalization
# Dependents had an issue because of the +
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Applying PCA
from sklearn.decomposition import PCA
pca = PCA(n_components=6)
X_train = pca.fit_transform(X_train)
X_test = pca.transform(X_test)
# Obtaining the explained variance ratio which returns the variance caused by each of the principal components.
# We execute the following line of code to find the "explained variance ratio"
explained_variance = pca.explained_variance_ratio_
explained_variance
# Plotting our scree plot
plt.plot(pca.explained_variance_ratio_)
plt.xlabel('Number of components', fontsize = 14)
plt.ylabel('Explained variance', fontsize = 14)
plt.title('Scree Plot', fontsize = 16)
plt.show()
# Training and Making Predictions
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(max_depth=2, random_state=0)
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Performance Evaluation
#
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
cm = confusion_matrix(y_test, y_pred)
print(cm)
print('Accuracy' , accuracy_score(y_test, y_pred))
```
## 9. Challenging the solution
If we had more knowledge on machine learning, we would have built a classification model to accompany the hypothesis tests and hence strengthen our analysis.
## 10. Follow-up questions
At this point, we can refine our question or collect new data, all in an iterative process to get at the truth.
### a) Did we have the right data?
> Yes. The data was relevant in context to our research.
### b) Do we need other data to answer the question?
> Our data was quite right but if there was a way we could collect more data on the same it would be better.
### c) Did we have the right question?
> Yes
## 11. Conclusions and Recommendations
> From our exploratory data analysis and statistical analysis techniques we have made some observations therefore we can draw some conclusions.
> - In consideration of this dataset containing a substantial amount of outliers standing at 42%, it deemed best not to drop them because in the case of loans, some questions are sensitive eg gender.
- The number of loans approved was 69% and disapproved loans were 31%
- It appears that males apply for more loans than women because males are 82% and females are 18%. Same case applies to graduates. This is because they have high numbers in loan applications. It would make one wonder if graduates are not being paid well or they need the loans to facilitate other things like investments
- There was no significant difference between number of approved loans considering that a person is self employed or not. This implies that loan approval is more dependent on applicant income.
- About 78% of loans were approved for applicants with good credit history.
- There was a high number of approved loans for applicant coming from semi-urban areas. This would suggest that they could have higher income.
- The number of dependents affects an applicants loan approval rate. It was interesting to find that people with 2 dependents had more loan approvals.
- We also find out that credit history have a significantly high relationship with loan approval.
- There is a relationship between area and loan status
- Undoubtedly, despite more males applying for loans, gender does not affect whether a loan is approved or not.
- It is discovered that those earning between 0 and 20,000 go for smaller loans amounts. If the bank capitalizes on offering good rates, they can make good profits since smaller loans have shorter repayment periods.
We recommend that the lender should prioritize people with good credit history and high income earners.
The lenders should have better data collection methods to capture more data in order for the models to make more accurate predictions.
| github_jupyter |
# Dependencies
```
import os
import random
import warnings
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.utils import class_weight
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, cohen_kappa_score
from keras import backend as K
from keras.models import Model
from keras import optimizers, applications
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import EarlyStopping, ReduceLROnPlateau, Callback
from keras.layers import Dense, Dropout, GlobalAveragePooling2D, Input
# Set seeds to make the experiment more reproducible.
from tensorflow import set_random_seed
def seed_everything(seed=0):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
set_random_seed(0)
seed_everything()
%matplotlib inline
sns.set(style="whitegrid")
warnings.filterwarnings("ignore")
```
# Load data
```
train = pd.read_csv('../input/aptos2019-blindness-detection/train.csv')
test = pd.read_csv('../input/aptos2019-blindness-detection/test.csv')
print('Number of train samples: ', train.shape[0])
print('Number of test samples: ', test.shape[0])
# Preprocecss data
train["id_code"] = train["id_code"].apply(lambda x: x + ".png")
test["id_code"] = test["id_code"].apply(lambda x: x + ".png")
train['diagnosis'] = train['diagnosis'].astype('str')
display(train.head())
```
# Model parameters
```
# Model parameters
BATCH_SIZE = 8
EPOCHS = 30
WARMUP_EPOCHS = 2
LEARNING_RATE = 1e-4
WARMUP_LEARNING_RATE = 1e-3
HEIGHT = 512
WIDTH = 512
CANAL = 3
N_CLASSES = train['diagnosis'].nunique()
ES_PATIENCE = 5
RLROP_PATIENCE = 3
DECAY_DROP = 0.5
def kappa(y_true, y_pred, n_classes=5):
y_trues = K.cast(K.argmax(y_true), K.floatx())
y_preds = K.cast(K.argmax(y_pred), K.floatx())
n_samples = K.cast(K.shape(y_true)[0], K.floatx())
distance = K.sum(K.abs(y_trues - y_preds))
max_distance = n_classes - 1
kappa_score = 1 - ((distance**2) / (n_samples * (max_distance**2)))
return kappa_score
```
# Train test split
```
X_train, X_val = train_test_split(train, test_size=0.25, random_state=0)
```
# Data generator
```
train_datagen=ImageDataGenerator(rescale=1./255,
rotation_range=360,
brightness_range=[0.5, 1.5],
zoom_range=[1, 1.2],
zca_whitening=True,
horizontal_flip=True,
vertical_flip=True,
fill_mode='constant',
cval=0.)
train_generator=train_datagen.flow_from_dataframe(
dataframe=X_train,
directory="../input/aptos2019-blindness-detection/train_images/",
x_col="id_code",
y_col="diagnosis",
batch_size=BATCH_SIZE,
class_mode="categorical",
target_size=(HEIGHT, WIDTH))
valid_generator=train_datagen.flow_from_dataframe(
dataframe=X_val,
directory="../input/aptos2019-blindness-detection/train_images/",
x_col="id_code",
y_col="diagnosis",
batch_size=BATCH_SIZE,
class_mode="categorical",
target_size=(HEIGHT, WIDTH))
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_dataframe(
dataframe=test,
directory = "../input/aptos2019-blindness-detection/test_images/",
x_col="id_code",
target_size=(HEIGHT, WIDTH),
batch_size=1,
shuffle=False,
class_mode=None)
```
# Model
```
def create_model(input_shape, n_out):
input_tensor = Input(shape=input_shape)
base_model = applications.ResNet50(weights=None,
include_top=False,
input_tensor=input_tensor)
base_model.load_weights('../input/resnet50/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5')
x = GlobalAveragePooling2D()(base_model.output)
x = Dropout(0.5)(x)
x = Dense(2048, activation='relu')(x)
x = Dropout(0.5)(x)
final_output = Dense(n_out, activation='softmax', name='final_output')(x)
model = Model(input_tensor, final_output)
return model
model = create_model(input_shape=(HEIGHT, WIDTH, CANAL), n_out=N_CLASSES)
for layer in model.layers:
layer.trainable = False
for i in range(-5, 0):
model.layers[i].trainable = True
class_weights = class_weight.compute_class_weight('balanced', np.unique(train['diagnosis'].astype('int').values), train['diagnosis'].astype('int').values)
metric_list = ["accuracy", kappa]
optimizer = optimizers.Adam(lr=WARMUP_LEARNING_RATE)
model.compile(optimizer=optimizer, loss="categorical_crossentropy", metrics=metric_list)
model.summary()
```
# Train top layers
```
STEP_SIZE_TRAIN = train_generator.n//train_generator.batch_size
STEP_SIZE_VALID = valid_generator.n//valid_generator.batch_size
history_warmup = model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
epochs=WARMUP_EPOCHS,
class_weight=class_weights,
verbose=1).history
```
# Fine-tune the complete model
```
for layer in model.layers:
layer.trainable = True
es = EarlyStopping(monitor='val_loss', mode='min', patience=ES_PATIENCE, restore_best_weights=True, verbose=1)
rlrop = ReduceLROnPlateau(monitor='val_loss', mode='min', patience=RLROP_PATIENCE, factor=DECAY_DROP, min_lr=1e-6, verbose=1)
callback_list = [es, rlrop]
optimizer = optimizers.Adam(lr=LEARNING_RATE)
model.compile(optimizer=optimizer, loss="categorical_crossentropy", metrics=metric_list)
model.summary()
history_finetunning = model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
epochs=EPOCHS,
callbacks=callback_list,
class_weight=class_weights,
verbose=1).history
```
# Model loss graph
```
history = {'loss': history_warmup['loss'] + history_finetunning['loss'],
'val_loss': history_warmup['val_loss'] + history_finetunning['val_loss'],
'acc': history_warmup['acc'] + history_finetunning['acc'],
'val_acc': history_warmup['val_acc'] + history_finetunning['val_acc'],
'kappa': history_warmup['kappa'] + history_finetunning['kappa'],
'val_kappa': history_warmup['val_kappa'] + history_finetunning['val_kappa']}
sns.set_style("whitegrid")
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex='col', figsize=(20, 18))
ax1.plot(history['loss'], label='Train loss')
ax1.plot(history['val_loss'], label='Validation loss')
ax1.legend(loc='best')
ax1.set_title('Loss')
ax2.plot(history['acc'], label='Train accuracy')
ax2.plot(history['val_acc'], label='Validation accuracy')
ax2.legend(loc='best')
ax2.set_title('Accuracy')
ax3.plot(history['kappa'], label='Train kappa')
ax3.plot(history['val_kappa'], label='Validation kappa')
ax3.legend(loc='best')
ax3.set_title('Kappa')
plt.xlabel('Epochs')
sns.despine()
plt.show()
```
# Model Evaluation
```
lastFullTrainPred = np.empty((0, N_CLASSES))
lastFullTrainLabels = np.empty((0, N_CLASSES))
lastFullValPred = np.empty((0, N_CLASSES))
lastFullValLabels = np.empty((0, N_CLASSES))
for i in range(STEP_SIZE_TRAIN+1):
im, lbl = next(train_generator)
scores = model.predict(im, batch_size=train_generator.batch_size)
lastFullTrainPred = np.append(lastFullTrainPred, scores, axis=0)
lastFullTrainLabels = np.append(lastFullTrainLabels, lbl, axis=0)
for i in range(STEP_SIZE_VALID+1):
im, lbl = next(valid_generator)
scores = model.predict(im, batch_size=valid_generator.batch_size)
lastFullValPred = np.append(lastFullValPred, scores, axis=0)
lastFullValLabels = np.append(lastFullValLabels, lbl, axis=0)
```
# Threshold optimization
```
def find_best_fixed_threshold(preds, targs, do_plot=True):
best_thr_list = [0 for i in range(preds.shape[1])]
for index in reversed(range(1, preds.shape[1])):
score = []
thrs = np.arange(0, 1, 0.01)
for thr in thrs:
preds_thr = [index if x[index] > thr else np.argmax(x) for x in preds]
score.append(cohen_kappa_score(targs, preds_thr))
score = np.array(score)
pm = score.argmax()
best_thr, best_score = thrs[pm], score[pm].item()
best_thr_list[index] = best_thr
print(f'thr={best_thr:.3f}', f'F2={best_score:.3f}')
if do_plot:
plt.plot(thrs, score)
plt.vlines(x=best_thr, ymin=score.min(), ymax=score.max())
plt.text(best_thr+0.03, best_score-0.01, ('Kappa[%s]=%.3f'%(index, best_score)), fontsize=14);
plt.show()
return best_thr_list
lastFullComPred = np.concatenate((lastFullTrainPred, lastFullValPred))
lastFullComLabels = np.concatenate((lastFullTrainLabels, lastFullValLabels))
complete_labels = [np.argmax(label) for label in lastFullComLabels]
threshold_list = find_best_fixed_threshold(lastFullComPred, complete_labels, do_plot=True)
threshold_list[0] = 0 # In last instance assign label 0
train_preds = [np.argmax(pred) for pred in lastFullTrainPred]
train_labels = [np.argmax(label) for label in lastFullTrainLabels]
validation_preds = [np.argmax(pred) for pred in lastFullValPred]
validation_labels = [np.argmax(label) for label in lastFullValLabels]
train_preds_opt = [0 for i in range(lastFullTrainPred.shape[0])]
for idx, thr in enumerate(threshold_list):
for idx2, pred in enumerate(lastFullTrainPred):
if pred[idx] > thr:
train_preds_opt[idx2] = idx
validation_preds_opt = [0 for i in range(lastFullValPred.shape[0])]
for idx, thr in enumerate(threshold_list):
for idx2, pred in enumerate(lastFullValPred):
if pred[idx] > thr:
validation_preds_opt[idx2] = idx
```
## Confusion Matrix
```
fig, (ax1, ax2) = plt.subplots(1, 2, sharex='col', figsize=(24, 7))
labels = ['0 - No DR', '1 - Mild', '2 - Moderate', '3 - Severe', '4 - Proliferative DR']
train_cnf_matrix = confusion_matrix(train_labels, train_preds)
validation_cnf_matrix = confusion_matrix(validation_labels, validation_preds)
train_cnf_matrix_norm = train_cnf_matrix.astype('float') / train_cnf_matrix.sum(axis=1)[:, np.newaxis]
validation_cnf_matrix_norm = validation_cnf_matrix.astype('float') / validation_cnf_matrix.sum(axis=1)[:, np.newaxis]
train_df_cm = pd.DataFrame(train_cnf_matrix_norm, index=labels, columns=labels)
validation_df_cm = pd.DataFrame(validation_cnf_matrix_norm, index=labels, columns=labels)
sns.heatmap(train_df_cm, annot=True, fmt='.2f', cmap="Blues",ax=ax1).set_title('Train')
sns.heatmap(validation_df_cm, annot=True, fmt='.2f', cmap="Blues",ax=ax2).set_title('Validation')
plt.show()
fig, (ax1, ax2) = plt.subplots(1, 2, sharex='col', figsize=(24, 7))
labels = ['0 - No DR', '1 - Mild', '2 - Moderate', '3 - Severe', '4 - Proliferative DR']
train_cnf_matrix = confusion_matrix(train_labels, train_preds_opt)
validation_cnf_matrix = confusion_matrix(validation_labels, validation_preds_opt)
train_cnf_matrix_norm = train_cnf_matrix.astype('float') / train_cnf_matrix.sum(axis=1)[:, np.newaxis]
validation_cnf_matrix_norm = validation_cnf_matrix.astype('float') / validation_cnf_matrix.sum(axis=1)[:, np.newaxis]
train_df_cm = pd.DataFrame(train_cnf_matrix_norm, index=labels, columns=labels)
validation_df_cm = pd.DataFrame(validation_cnf_matrix_norm, index=labels, columns=labels)
sns.heatmap(train_df_cm, annot=True, fmt='.2f', cmap="Blues",ax=ax1).set_title('Train optimized')
sns.heatmap(validation_df_cm, annot=True, fmt='.2f', cmap="Blues",ax=ax2).set_title('Validation optimized')
plt.show()
```
## Quadratic Weighted Kappa
```
print("Train Cohen Kappa score: %.3f" % cohen_kappa_score(train_preds, train_labels, weights='quadratic'))
print("Validation Cohen Kappa score: %.3f" % cohen_kappa_score(validation_preds, validation_labels, weights='quadratic'))
print("Complete set Cohen Kappa score: %.3f" % cohen_kappa_score(train_preds+validation_preds, train_labels+validation_labels, weights='quadratic'))
print("Train optimized Cohen Kappa score: %.3f" % cohen_kappa_score(train_preds_opt, train_labels, weights='quadratic'))
print("Validation optimized Cohen Kappa score: %.3f" % cohen_kappa_score(validation_preds_opt, validation_labels, weights='quadratic'))
print("Complete optimized set Cohen Kappa score: %.3f" % cohen_kappa_score(train_preds_opt+validation_preds_opt, train_labels+validation_labels, weights='quadratic'))
```
# Apply model to test set and output predictions
```
test_generator.reset()
STEP_SIZE_TEST = test_generator.n//test_generator.batch_size
preds = model.predict_generator(test_generator, steps=STEP_SIZE_TEST)
predictions = [np.argmax(pred) for pred in preds]
predictions_opt = [0 for i in range(preds.shape[0])]
for idx, thr in enumerate(threshold_list):
for idx2, pred in enumerate(preds):
if pred[idx] > thr:
predictions_opt[idx2] = idx
filenames = test_generator.filenames
results = pd.DataFrame({'id_code':filenames, 'diagnosis':predictions})
results['id_code'] = results['id_code'].map(lambda x: str(x)[:-4])
results_opt = pd.DataFrame({'id_code':filenames, 'diagnosis':predictions_opt})
results_opt['id_code'] = results_opt['id_code'].map(lambda x: str(x)[:-4])
```
# Predictions class distribution
```
fig, (ax1, ax2) = plt.subplots(1, 2, sharex='col', figsize=(24, 8.7))
sns.countplot(x="diagnosis", data=results, palette="GnBu_d", ax=ax1)
sns.countplot(x="diagnosis", data=results_opt, palette="GnBu_d", ax=ax2)
sns.despine()
plt.show()
val_kappa = cohen_kappa_score(validation_preds, validation_labels, weights='quadratic')
val_opt_kappa = cohen_kappa_score(validation_preds_opt, validation_labels, weights='quadratic')
if val_kappa > val_opt_kappa:
results_name = 'submission.csv'
results_opt_name = 'submission_opt.csv'
else:
results_name = 'submission_norm.csv'
results_opt_name = 'submission.csv'
results.to_csv(results_name, index=False)
results.head(10)
results_opt.to_csv(results_opt_name, index=False)
results_opt.head(10)
```
| github_jupyter |
# Task: Predict User Item response under uniform exposure while learning from biased training data
Many current applications use recommendations in order to modify the natural user behavior, such as to increase the number of sales or the time spent on a website. This results in a gap between the final recommendation objective and the classical setup where recommendation candidates are evaluated by their coherence with past user behavior, by predicting either the missing entries in the user-item matrix, or the most likely next event. To bridge this gap, we optimize a recommendation policy for the task of increasing the desired outcome versus the organic user behavior. We show this is equivalent to learning to predict recommendation outcomes under a fully random recommendation policy. To this end, we propose a new domain adaptation algorithm that learns from logged data containing outcomes from a biased recommendation policy and predicts recommendation outcomes according to random exposure. We compare our method against state-of-the-art factorization methods and new approaches of causal recommendation and show significant improvements.
# Dataset
**MovieLens 100k dataset** was collected by the GroupLens Research Project at the University of Minnesota.
This data set consists of:
* 100,000 ratings (1-5) from 943 users on 1682 movies.
* Each user has rated at least 20 movies.
The data was collected through the MovieLens web site (movielens.umn.edu) during the seven-month period from September 19th, 1997 through April 22nd, 1998.
# Solution:
**Causal Matrix Factorization** - for more details see: https://arxiv.org/abs/1706.07639

# Metrics:
### * MSE - Mean Squared Error
### * NLL - Negative Log Likelihood
### * AUC - Area Under the Curve
-----------------------------
-----------------------------
# Questions:
### Q1: Add the definition for create_counterfactual_regularizer() method
### Q2: Compare the results of using variable values for cf_pen hyperparameter (0 vs. bigger)
### Q3: Compare different types of optimizers
### Q4: Push the performance as high as possible!
```
%%javascript
IPython.OutputArea.prototype._should_scroll = function(lines) {
return false;
}
import os
import string
import tempfile
import time
import numpy as np
import matplotlib.pyplot as plt
import csv
import random
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
from tensorboard import summary as summary_lib
from __future__ import absolute_import
from __future__ import print_function
tf.set_random_seed(42)
tf.logging.set_verbosity(tf.logging.INFO)
print(tf.__version__)
# Hyper-Parameters
flags = tf.app.flags
tf.app.flags.DEFINE_string('f', '', 'kernel')
flags.DEFINE_string('data_set', 'user_prod_dict.skew.', 'Dataset string.') # Reg Skew
flags.DEFINE_string('adapt_stat', 'adapt_2i', 'Adapt String.') # Adaptation strategy
flags.DEFINE_string('model_name', 'cp2v', 'Name of the model for saving.')
flags.DEFINE_float('learning_rate', 1.0, 'Initial learning rate.')
flags.DEFINE_integer('num_epochs', 1, 'Number of epochs to train.')
flags.DEFINE_integer('num_steps', 100, 'Number of steps after which to test.')
flags.DEFINE_integer('embedding_size', 100, 'Size of each embedding vector.')
flags.DEFINE_integer('batch_size', 512, 'How big is a batch of training.')
flags.DEFINE_float('cf_pen', 10.0, 'Counterfactual regularizer hyperparam.')
flags.DEFINE_float('l2_pen', 0.0, 'L2 regularizer hyperparam.')
flags.DEFINE_string('cf_loss', 'l1', 'Use L1 or L2 for the loss .')
FLAGS = tf.app.flags.FLAGS
#_DATA_PATH = "/Users/f.vasile/MyFolders/MyProjects/1.MyPapers/2018_Q2_DS3_Course/code/cp2v/src/Data/"
_DATA_PATH = "./data/"
train_data_set_location = _DATA_PATH + FLAGS.data_set + "train." + FLAGS.adapt_stat + ".csv" # Location of train dataset
test_data_set_location = _DATA_PATH + FLAGS.data_set + "test." + FLAGS.adapt_stat + ".csv" # Location of the test dataset
validation_test_set_location = _DATA_PATH + FLAGS.data_set + "valid_test." + FLAGS.adapt_stat + ".csv" # Location of the validation dataset
validation_train_set_location = _DATA_PATH + FLAGS.data_set + "valid_train." + FLAGS.adapt_stat + ".csv" #Location of the validation dataset
model_name = FLAGS.model_name + ".ckpt"
print(train_data_set_location)
def calculate_vocab_size(file_location):
"""Calculate the total number of unique elements in the dataset"""
with open(file_location, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
useridtemp = []
productid = []
for row in reader:
useridtemp.append(row[0])
productid.append(row[1])
userid_size = len(set(useridtemp))
productid_size = len(set(productid))
return userid_size, productid_size
userid_size, productid_size = calculate_vocab_size(train_data_set_location) # Calculate the total number of unique elements in the dataset
print(str(userid_size))
print(str(productid_size))
plot_gradients = False # Plot the gradients
cost_val = []
tf.set_random_seed(42)
def load_train_dataset(dataset_location, batch_size, num_epochs):
"""Load the training data using TF Dataset API"""
with tf.name_scope('train_dataset_loading'):
record_defaults = [[1], [1], [0.]] # Sets the type of the resulting tensors and default values
# Dataset is in the format - UserID ProductID Rating
dataset = tf.data.TextLineDataset(dataset_location).map(lambda line: tf.decode_csv(line, record_defaults=record_defaults))
dataset = dataset.shuffle(buffer_size=10000)
dataset = dataset.batch(batch_size)
dataset = dataset.cache()
dataset = dataset.repeat(num_epochs)
iterator = dataset.make_one_shot_iterator()
user_batch, product_batch, label_batch = iterator.get_next()
label_batch = tf.expand_dims(label_batch, 1)
return user_batch, product_batch, label_batch
def load_test_dataset(dataset_location):
"""Load the test and validation datasets"""
user_list = []
product_list = []
labels = []
with open(dataset_location, 'r') as f:
reader = csv.reader(f)
for row in reader:
user_list.append(row[0])
product_list.append(row[1])
labels.append(row[2])
labels = np.reshape(labels, [-1, 1])
cr = compute_empirical_cr(labels)
return user_list, product_list, labels, cr
def compute_2i_regularization_id(prods, num_products):
"""Compute the ID for the regularization for the 2i approach"""
reg_ids = []
# Loop through batch and compute if the product ID is greater than the number of products
for x in np.nditer(prods):
if x >= num_products:
reg_ids.append(x)
elif x < num_products:
reg_ids.append(x + num_products) # Add number of products to create the 2i representation
return np.asarray(reg_ids)
def generate_bootstrap_batch(seed, data_set_size):
"""Generate the IDs for the bootstap"""
random.seed(seed)
ids = [random.randint(0, data_set_size-1) for j in range(int(data_set_size*0.8))]
return ids
def compute_empirical_cr(labels):
"""Compute the cr from the empirical data"""
labels = labels.astype(np.float)
clicks = np.count_nonzero(labels)
views = len(np.where(labels==0)[0])
cr = float(clicks)/float(views)
return cr
def create_average_predictor_tensors(label_list_placeholder, logits_placeholder):
"""Create the tensors required to run the averate predictor for the bootstraps"""
with tf.device('/cpu:0'):
with tf.variable_scope('ap_logits'):
ap_logits = tf.reshape(logits_placeholder, [tf.shape(label_list_placeholder)[0], 1])
with tf.name_scope('ap_losses'):
ap_mse_loss = tf.losses.mean_squared_error(labels=label_list_placeholder, predictions=ap_logits)
ap_log_loss = tf.losses.log_loss(labels=label_list_placeholder, predictions=ap_logits)
with tf.name_scope('ap_metrics'):
# Add performance metrics to the tensorflow graph
ap_correct_predictions = tf.equal(tf.round(ap_logits), label_list_placeholder)
ap_accuracy = tf.reduce_mean(tf.cast(ap_correct_predictions, tf.float32))
return ap_mse_loss, ap_log_loss
def compute_bootstraps_2i(sess, model, test_user_batch, test_product_batch, test_label_batch, test_logits, running_vars_initializer, ap_mse_loss, ap_log_loss):
"""Compute the bootstraps for the 2i model"""
data_set_size = len(test_user_batch)
mse = []
llh = []
ap_mse = []
ap_llh = []
auc_list = []
mse_diff = []
llh_diff = []
# Compute the bootstrap values for the test split - this compute the empirical CR as well for comparision
for i in range(30):
ids = generate_bootstrap_batch(i*2, data_set_size)
test_user_batch = np.asarray(test_user_batch)
test_product_batch = np.asarray(test_product_batch)
test_label_batch = np.asarray(test_label_batch)
# Reset the running variables used for the AUC
sess.run(running_vars_initializer)
# Construct the feed-dict for the model and the average predictor
feed_dict = {model.user_list_placeholder : test_user_batch[ids], model.product_list_placeholder: test_product_batch[ids], model.label_list_placeholder: test_label_batch[ids], model.logits_placeholder: test_logits[ids], model.reg_list_placeholder: test_product_batch[ids]}
# Run the model test step updating the AUC object
_, loss_val, mse_loss_val, log_loss_val = sess.run([model.auc_update_op, model.loss, model.mse_loss, model.log_loss], feed_dict=feed_dict)
auc_score = sess.run(model.auc, feed_dict=feed_dict)
# Run the Average Predictor graph
ap_mse_val, ap_log_val = sess.run([ap_mse_loss, ap_log_loss], feed_dict=feed_dict)
mse.append(mse_loss_val)
llh.append(log_loss_val)
ap_mse.append(ap_mse_val)
ap_llh.append(ap_log_val)
auc_list.append(auc_score)
for i in range(30):
mse_diff.append((ap_mse[i]-mse[i]) / ap_mse[i])
llh_diff.append((ap_llh[i]-llh[i]) / ap_llh[i])
print("MSE Mean Score On The Bootstrap = ", np.mean(mse))
print("MSE Mean Lift Over Average Predictor (%) = ", np.round(np.mean(mse_diff)*100, decimals=2))
print("MSE STD (%) =" , np.round(np.std(mse_diff)*100, decimals=2))
print("LLH Mean Over Average Predictor (%) =", np.round(np.mean(llh_diff)*100, decimals=2))
print("LLH STD (%) = ", np.round(np.std(llh_diff)*100, decimals=2))
print("Mean AUC Score On The Bootstrap = ", np.round(np.mean(auc_list), decimals=4), "+/-", np.round(np.std(auc_list), decimals=4))
```
### About Supervised Prod2vec
- Class to define MF of the implicit feedback matrix (1/0/unk) of Users x Products
- When called it creates the TF graph for the associated NN:
Step1: self.create_placeholders() => Creates the input placeholders
Step2: self.build_graph() => Creates the 3 layers:
- the user embedding layer
- the product embedding layer
- the output prediction layer
Step3: self.create_losses() => Defines the loss function for prediction
Step4: self.add_optimizer() => Defines the optimizer
Step5: self.add_performance_metrics() => Defines the logging performance metrics ???
Step6: self.add_summaries() => Defines the final performance stats
```
class SupervisedProd2vec():
def __init__(self, userid_size, productid_size, embedding_size, l2_pen, learning_rate):
self.userid_size = userid_size
self.productid_size = productid_size
self.embedding_size = embedding_size
self.l2_pen = l2_pen
self.learning_rate = learning_rate
# Build the graph
self.create_placeholders()
self.build_graph()
self.create_losses()
self.add_optimizer()
self.add_performance_metrics()
self.add_summaries()
def create_placeholders(self):
"""Create the placeholders to be used """
self.user_list_placeholder = tf.placeholder(tf.int32, [None], name="user_list_placeholder")
self.product_list_placeholder = tf.placeholder(tf.int32, [None], name="product_list_placeholder")
self.label_list_placeholder = tf.placeholder(tf.float32, [None, 1], name="label_list_placeholder")
# logits placeholder used to store the test CR for the bootstrapping process
self.logits_placeholder = tf.placeholder(tf.float32, [None], name="logits_placeholder")
def build_graph(self):
"""Build the main tensorflow graph with embedding layers"""
with tf.name_scope('embedding_layer'):
# User matrix and current batch
self.user_embeddings = tf.get_variable("user_embeddings", shape=[self.userid_size, self.embedding_size], initializer=tf.contrib.layers.xavier_initializer(), trainable=True)
self.user_embed = tf.nn.embedding_lookup(self.user_embeddings, self.user_list_placeholder) # Lookup the Users for the given batch
self.user_b = tf.Variable(tf.zeros([self.userid_size]), name='user_b', trainable=True)
self.user_bias_embed = tf.nn.embedding_lookup(self.user_b, self.user_list_placeholder)
# Product embedding
self.product_embeddings = tf.get_variable("product_embeddings", shape=[self.productid_size, self.embedding_size], initializer=tf.contrib.layers.xavier_initializer(), trainable=True)
self.product_embed = tf.nn.embedding_lookup(self.product_embeddings, self.product_list_placeholder) # Lookup the embeddings2 for the given batch
self.prod_b = tf.Variable(tf.zeros([self.productid_size]), name='prod_b', trainable=True)
self.prod_bias_embed = tf.nn.embedding_lookup(self.prod_b, self.product_list_placeholder)
with tf.variable_scope('logits'):
self.b = tf.get_variable('b', [1], initializer=tf.constant_initializer(0.0, dtype=tf.float32), trainable=True)
self.alpha = tf.get_variable('alpha', [], initializer=tf.constant_initializer(0.00000001, dtype=tf.float32), trainable=True)
#alpha * (<user_i, prod_j>
self.emb_logits = self.alpha * tf.reshape(tf.reduce_sum(tf.multiply(self.user_embed, self.product_embed), 1), [tf.shape(self.user_list_placeholder)[0], 1])
#prod_bias + user_bias + global_bias
self.logits = tf.reshape(tf.add(self.prod_bias_embed, self.user_bias_embed), [tf.shape(self.user_list_placeholder)[0], 1]) + self.b
self.logits = self.emb_logits + self.logits
self.prediction = tf.sigmoid(self.logits, name='sigmoid_prediction')
def create_losses(self):
"""Create the losses"""
with tf.name_scope('losses'):
#Sigmoid loss between the logits and labels
self.loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=self.label_list_placeholder))
#Adding the regularizer term on user vct and prod vct
self.loss = self.loss + self.l2_pen * tf.nn.l2_loss(self.user_embeddings) + self.l2_pen * tf.nn.l2_loss(self.product_embeddings) + self.l2_pen * tf.nn.l2_loss(self.prod_b) + self.l2_pen * tf.nn.l2_loss(self.user_b)
#Compute MSE loss
self.mse_loss = tf.losses.mean_squared_error(labels=self.label_list_placeholder, predictions=tf.sigmoid(self.logits))
#Compute Log loss
self.log_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=self.label_list_placeholder))
def add_optimizer(self):
"""Add the required optimiser to the graph"""
with tf.name_scope('optimizer'):
# Global step variable to keep track of the number of training steps
self.global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
self.apply_grads = tf.train.GradientDescentOptimizer(self.learning_rate).minimize(self.loss, global_step=self.global_step)
def add_performance_metrics(self):
"""Add the required performance metrics to the graph"""
with tf.name_scope('performance_metrics'):
# Add performance metrics to the tensorflow graph
correct_predictions = tf.equal(tf.round(self.prediction), self.label_list_placeholder)
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32), name="accuracy")
self.auc, self.auc_update_op = tf.metrics.auc(labels=self.label_list_placeholder, predictions=self.prediction, num_thresholds=1000, name="auc_metric")
def add_summaries(self):
"""Add the required summaries to the graph"""
with tf.name_scope('summaries'):
# Add loss to the summaries
tf.summary.scalar('total_loss', self.loss)
tf.summary.histogram('histogram_total_loss', self.loss)
# Add weights to the summaries
tf.summary.histogram('user_embedding_weights', self.user_embeddings)
tf.summary.histogram('product_embedding_weights', self.product_embeddings)
tf.summary.histogram('logits', self.logits)
tf.summary.histogram('prod_b', self.prod_b)
tf.summary.histogram('user_b', self.user_b)
tf.summary.histogram('global_bias', self.b)
tf.summary.scalar('alpha', self.alpha)
```
### CausalProd2Vec2i - inherits from SupervisedProd2vec
- Class to define the causal version of MF of the implicit feedback matrix (1/0/unk) of Users x Products
- When called it creates the TF graph for the associated NN:
**Step1: Changed: +regularizer placeholder** self.create_placeholders() => Creates the input placeholders
**Step2:** self.build_graph() => Creates the 3 layers:
- the user embedding layer
- the product embedding layer
- the output prediction layer
**New:**
self.create_control_embeddings()
self.create_counter_factual_loss()
**Step3: Changed: +add regularizer between embeddings** self.create_losses() => Defines the loss function for prediction
**Step4:** self.add_optimizer() => Defines the optimizer
**Step5:** self.add_performance_metrics() => Defines the logging performance metrics ???
**Step6:** self.add_summaries() => Defines the final performance stats
```
class CausalProd2Vec2i(SupervisedProd2vec):
def __init__(self, userid_size, productid_size, embedding_size, l2_pen, learning_rate, cf_pen, cf='l1'):
self.userid_size = userid_size
self.productid_size = productid_size * 2 # Doubled to accommodate the treatment embeddings
self.embedding_size = embedding_size
self.l2_pen = l2_pen
self.learning_rate = learning_rate
self.cf_pen = cf_pen
self.cf = cf
# Build the graph
self.create_placeholders()
self.build_graph()
self.create_control_embeddings()
#self.create_counterfactual_regularizer()
self.create_losses()
self.add_optimizer()
self.add_performance_metrics()
self.add_summaries()
def create_placeholders(self):
"""Create the placeholders to be used """
self.user_list_placeholder = tf.placeholder(tf.int32, [None], name="user_list_placeholder")
self.product_list_placeholder = tf.placeholder(tf.int32, [None], name="product_list_placeholder")
self.label_list_placeholder = tf.placeholder(tf.float32, [None, 1], name="label_list_placeholder")
self.reg_list_placeholder = tf.placeholder(tf.int32, [None], name="reg_list_placeholder")
# logits placeholder used to store the test CR for the bootstrapping process
self.logits_placeholder = tf.placeholder(tf.float32, [None], name="logits_placeholder")
def create_control_embeddings(self):
"""Create the control embeddings"""
with tf.name_scope('control_embedding'):
# Get the control embedding at id 0
self.control_embed = tf.stop_gradient(tf.nn.embedding_lookup(self.product_embeddings, self.reg_list_placeholder))
#################################
## SOLUTION TO Q1 GOES HERE! ##
#################################
#def create_counterfactual_regularizer(self):
# self.cf_reg
def create_losses(self):
"""Create the losses"""
with tf.name_scope('losses'):
#Sigmoid loss between the logits and labels
self.log_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=self.label_list_placeholder))
#Adding the regularizer term on user vct and prod vct and their bias terms
reg_term = self.l2_pen * ( tf.nn.l2_loss(self.user_embeddings) + tf.nn.l2_loss(self.product_embeddings) )
reg_term_biases = self.l2_pen * ( tf.nn.l2_loss(self.prod_b) + tf.nn.l2_loss(self.user_b) )
self.loss = self.log_loss + reg_term + reg_term_biases
#Adding the counterfactual regualizer term
# Q1: Write the method that computes the counterfactual regularizer
#self.create_counterfactual_regularizer()
#self.loss = self.loss + (self.cf_pen * self.cf_reg)
#Compute addtionally the MSE loss
self.mse_loss = tf.losses.mean_squared_error(labels=self.label_list_placeholder, predictions=tf.sigmoid(self.logits))
```
### Create the TF Graph
```
# Create graph object
graph = tf.Graph()
with graph.as_default():
with tf.device('/cpu:0'):
# Load the required graph
### Number of products and users
productid_size = 1683
userid_size = 944
model = CausalProd2Vec2i(userid_size, productid_size+1, FLAGS.embedding_size, FLAGS.l2_pen, FLAGS.learning_rate, FLAGS.cf_pen, cf=FLAGS.cf_loss)
ap_mse_loss, ap_log_loss = create_average_predictor_tensors(model.label_list_placeholder, model.logits_placeholder)
# Define initializer to initialize/reset running variables
running_vars = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope="performance_metrics/auc_metric")
running_vars_initializer = tf.variables_initializer(var_list=running_vars)
# Get train data batch from queue
next_batch = load_train_dataset(train_data_set_location, FLAGS.batch_size, FLAGS.num_epochs)
test_user_batch, test_product_batch, test_label_batch, test_cr = load_test_dataset(test_data_set_location)
val_test_user_batch, val_test_product_batch, val_test_label_batch, val_cr = load_test_dataset(validation_test_set_location)
val_train_user_batch, val_train_product_batch, val_train_label_batch, val_cr = load_test_dataset(validation_train_set_location)
# create the empirical CR test logits
test_logits = np.empty(len(test_label_batch))
test_logits.fill(test_cr)
```
### Launch the Session: Train the model
```
# Launch the Session
with tf.Session(graph=graph, config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)) as sess:
# initialise all the TF variables
init_op = tf.global_variables_initializer()
sess.run(init_op)
# Setup tensorboard: tensorboard --logdir=/tmp/tensorboard
time_tb = str(time.ctime(int(time.time())))
train_writer = tf.summary.FileWriter('/tmp/tensorboard' + '/train' + time_tb, sess.graph)
test_writer = tf.summary.FileWriter('/tmp/tensorboard' + '/test' + time_tb, sess.graph)
merged = tf.summary.merge_all()
# Embeddings viz (Possible to add labels for embeddings later)
saver = tf.train.Saver()
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
embedding.tensor_name = model.product_embeddings.name
projector.visualize_embeddings(train_writer, config)
# Variables used in the training loop
t = time.time()
step = 0
average_loss = 0
average_mse_loss = 0
average_log_loss = 0
# Start the training loop---------------------------------------------------------------------------------------------
print("Starting Training On Causal Prod2Vec")
print(FLAGS.cf_loss)
print("Num Epochs = ", FLAGS.num_epochs)
print("Learning Rate = ", FLAGS.learning_rate)
print("L2 Reg = ", FLAGS.l2_pen)
print("CF Reg = ", FLAGS.cf_pen)
try:
while True:
# Run the TRAIN for this step batch ---------------------------------------------------------------------
# Construct the feed_dict
user_batch, product_batch, label_batch = sess.run(next_batch)
# Treatment is the small set of samples from St, Control is the larger set of samples from Sc
reg_ids = compute_2i_regularization_id(product_batch, productid_size) # Compute the product ID's for regularization
feed_dict = {model.user_list_placeholder : user_batch, model.product_list_placeholder: product_batch, model.reg_list_placeholder: reg_ids, model.label_list_placeholder: label_batch}
# Run the graph
_, sum_str, loss_val, mse_loss_val, log_loss_val = sess.run([model.apply_grads, merged, model.loss, model.mse_loss, model.log_loss], feed_dict=feed_dict)
step +=1
average_loss += loss_val
average_mse_loss += mse_loss_val
average_log_loss += log_loss_val
# Every num_steps print average loss
if step % FLAGS.num_steps == 0:
if step > FLAGS.num_steps:
# The average loss is an estimate of the loss over the last set batches.
average_loss /= FLAGS.num_steps
average_mse_loss /= FLAGS.num_steps
average_log_loss /= FLAGS.num_steps
print("Average Training Loss on S_c (FULL, MSE, NLL) at step ", step, ": ", average_loss, ": ", average_mse_loss, ": ", average_log_loss, "Time taken (S) = " + str(round(time.time() - t, 1)))
average_loss = 0
t = time.time() # reset the time
train_writer.add_summary(sum_str, step) # Write the summary
# Run the VALIDATION for this step batch ---------------------------------------------------------------------
val_train_product_batch = np.asarray(val_train_product_batch, dtype=np.float32)
val_test_product_batch = np.asarray(val_test_product_batch, dtype=np.float32)
vaL_train_reg_ids = compute_2i_regularization_id(val_train_product_batch, productid_size) # Compute the product ID's for regularization
vaL_test_reg_ids = compute_2i_regularization_id(val_test_product_batch, productid_size) # Compute the product ID's for regularization
feed_dict_test = {model.user_list_placeholder : val_test_user_batch, model.product_list_placeholder: val_test_product_batch, model.reg_list_placeholder: vaL_test_reg_ids, model.label_list_placeholder: val_test_label_batch}
feed_dict_train = {model.user_list_placeholder : val_train_user_batch, model.product_list_placeholder: val_train_product_batch, model.reg_list_placeholder: vaL_train_reg_ids, model.label_list_placeholder: val_train_label_batch}
sum_str, loss_val, mse_loss_val, log_loss_val = sess.run([merged, model.loss, model.mse_loss, model.log_loss], feed_dict=feed_dict_train)
print("Validation loss on S_c (FULL, MSE, NLL) at step ", step, ": ", loss_val, ": ", mse_loss_val, ": ", log_loss_val)
sum_str, loss_val, mse_loss_val, log_loss_val = sess.run([merged, model.loss, model.mse_loss, model.log_loss], feed_dict=feed_dict_test)
cost_val.append(loss_val)
print("Validation loss on S_t(FULL, MSE, NLL) at step ", step, ": ", loss_val, ": ", mse_loss_val, ": ", log_loss_val)
print("####################################################################################################################")
test_writer.add_summary(sum_str, step) # Write the summary
except tf.errors.OutOfRangeError:
print("Reached the number of epochs")
finally:
saver.save(sess, os.path.join('/tmp/tensorboard', model_name), model.global_step) # Save model
train_writer.close()
print("Training Complete")
# Run the bootstrap for this model ---------------------------------------------------------------------------------------------------------------
print("Begin Bootstrap process...")
print("Running BootStrap On The Control Representations")
compute_bootstraps_2i(sess, model, test_user_batch, test_product_batch, test_label_batch, test_logits, running_vars_initializer, ap_mse_loss, ap_log_loss)
print("Running BootStrap On The Treatment Representations")
test_product_batch = [int(x) + productid_size for x in test_product_batch]
compute_bootstraps_2i(sess, model, test_user_batch, test_product_batch, test_label_batch, test_logits, running_vars_initializer, ap_mse_loss, ap_log_loss)
```
| github_jupyter |
# Ray RLlib - Introduction to Reinforcement Learning
© 2019-2021, Anyscale. All Rights Reserved

_Reinforcement Learning_ is the category of machine learning that focuses on training one or more _agents_ to achieve maximal _rewards_ while operating in an environment. This lesson discusses the core concepts of RL, while subsequent lessons explore RLlib in depth. We'll use two examples with exercises to give you a taste of RL. If you already understand RL concepts, you can either skim this lesson or skip to the [next lesson](02-Introduction-to-RLlib.ipynb).
## What Is Reinforcement Learning?
Let's explore the basic concepts of RL, specifically the _Markov Decision Process_ abstraction, and to show its use in Python.
Consider the following image:

In RL, one or more **agents** interact with an **environment** to maximize a **reward**. The agents make **observations** about the **state** of the environment and take **actions** that are believed will maximize the long-term reward. However, at any particular moment, the agents can only observe the immediate reward. So, the training process usually involves lots and lot of replay of the game, the robot simulator traversing a virtual space, etc., so the agents can learn from repeated trials what decisions/actions work best to maximize the long-term, cumulative reward.
The trail and error search and delayed reward are the distinguishing characterists of RL vs. other ML methods ([Sutton 2018](06-RL-References.ipynb#Books)).
The way to formalize trial and error is the **exploitation vs. exploration tradeoff**. When an agent finds what appears to be a "rewarding" sequence of actions, the agent may naturally want to continue to **exploit** these actions. However, even better actions may exist. An agent won't know whether alternatives are better or not unless some percentage of actions taken **explore** the alternatives. So, all RL algorithms include a strategy for exploitation and exploration.
## RL Applications
RL has many potential applications. RL became "famous" due to these successes, including achieving expert game play, training robots, autonomous vehicles, and other simulated agents:






Credits:
* [AlphaGo](https://www.youtube.com/watch?v=l7ngy56GY6k)
* [Breakout](https://towardsdatascience.com/tutorial-double-deep-q-learning-with-dueling-network-architectures-4c1b3fb7f756) ([paper](https://arxiv.org/abs/1312.5602))
* [Stacking Legos with Sawyer](https://robohub.org/soft-actor-critic-deep-reinforcement-learning-with-real-world-robots/)
* [Walking Man](https://openai.com/blog/openai-baselines-ppo/)
* [Autonomous Vehicle](https://www.daimler.com/innovation/case/autonomous/intelligent-drive-2.html)
* ["Cassie": Two-legged Robot](https://mime.oregonstate.edu/research/drl/robots/cassie/) (Uses Ray!)
Recently other industry applications have emerged, include the following:
* **Process optimization:** industrial processes (factories, pipelines) and other business processes, routing problems, cluster optimization.
* **Ad serving and recommendations:** Some of the traditional methods, including _collaborative filtering_, are hard to scale for very large data sets. RL systems are being developed to do an effective job more efficiently than traditional methods.
* **Finance:** Markets are time-oriented _environments_ where automated trading systems are the _agents_.
## Markov Decision Processes
At its core, Reinforcement learning builds on the concepts of [Markov Decision Process (MDP)](https://en.wikipedia.org/wiki/Markov_decision_process), where the current state, the possible actions that can be taken, and overall goal are the building blocks.
An MDP models sequential interactions with an external environment. It consists of the following:
- a **state space** where the current state of the system is sometimes called the **context**.
- a set of **actions** that can be taken at a particular state $s$ (or sometimes the same set for all states).
- a **transition function** that describes the probability of being in a state $s'$ at time $t+1$ given that the MDP was in state $s$ at time $t$ and action $a$ was taken. The next state is selected stochastically based on these probabilities.
- a **reward function**, which determines the reward received at time $t$ following action $a$, based on the decision of **policy** $\pi$.
The goal of MDP is to develop a **policy** $\pi$ that specifies what action $a$ should be chosen for a given state $s$ so that the cumulative reward is maximized. When it is possible for the policy "trainer" to fully observe all the possible states, actions, and rewards, it can define a deterministic policy, fixing a single action choice for each state. In this scenario, the transition probabilities reduce to the probability of transitioning to state $s'$ given the current state is $s$, independent of actions, because the state now leads to a deterministic action choice. Various algorithms can be used to compute this policy.
Put another way, if the policy isn't deterministic, then the transition probability to state $s'$ at a time $t+1$ when action $a$ is taken for state $s$ at time $t$, is given by:
\begin{equation}
P_a(s',s) = P(s_{t+1} = s'|s_t=s,a)
\end{equation}
When the policy is deterministic, this transition probability reduces to the following, independent of $a$:
\begin{equation}
P(s',s) = P(s_{t+1} = s'|s_t=s)
\end{equation}
To be clear, a deterministic policy means that one and only one action will always be selected for a given state $s$, but the next state $s'$ will still be selected stochastically.
In the general case of RL, it isn't possible to fully know all this information, some of which might be hidden and evolving, so it isn't possible to specify a fully-deterministic policy.
Often this cumulative reward is computed using the **discounted sum** over all rewards observed:
\begin{equation}
\arg\max_{\pi} \sum_{t=1}^T \gamma^t R_t(\pi),
\end{equation}
where $T$ is the number of steps taken in the MDP (this is a random variable and may depend on $\pi$), $R_t$ is the reward received at time $t$ (also a random variable which depends on $\pi$), and $\gamma$ is the **discount factor**. The value of $\gamma$ is between 0 and 1, meaning it has the effect of "discounting" earlier rewards vs. more recent rewards.
The [Wikipedia page on MDP](https://en.wikipedia.org/wiki/Markov_decision_process) provides more details. Note what we said in the third bullet, that the new state only depends on the previous state and the action taken. The assumption is that we can simplify our effort by ignoring all the previous states except the last one and still achieve good results. This is known as the [Markov property](https://en.wikipedia.org/wiki/Markov_property). This assumption often works well and it greatly reduces the resources required.
## The Elements of RL
Here are the elements of RL that expand on MDP concepts (see [Sutton 2018](https://mitpress.mit.edu/books/reinforcement-learning-second-edition) for more details):
#### Policies
Unlike MDP, the **transition function** probabilities are often not known in advance, but must be learned. Learning is done through repeated "play", where the agent interacts with the environment.
This makes the **policy** $\pi$ harder to determine. Because the fully state space usually can't be fully known, the choice of action $a$ for given state $s$ almostly always remains a stochastic choice, never deterministic, unlike MDP.
#### Reward Signal
The idea of a **reward signal** encapsulates the desired goal for the system and provides feedback for updating the policy based on how well particular events or actions contribute rewards towards the goal.
#### Value Function
The **value function** encapsulates the maximum cumulative reward likely to be achieved starting from a given state for an **episode**. This is harder to determine than the simple reward returned after taking an action. In fact, much of the research in RL over the decades has focused on finding better and more efficient implementations of value functions. To illustrate the challenge, repeatedly taking one sequence of actions may yield low rewards for a while, but eventually provide large rewards. Conversely, always choosing a different sequence of actions may yield a good reward at each step, but be suboptimal for the cumulative reward.
#### Episode
A sequence of steps by the agent starting in an initial state. At each step, the agent observes the current state, chooses the next action, and receives the new reward. Episodes are used for both training policies and replaying with an existing policy (called _rollout_).
#### Model
An optional feature, some RL algorithms develop or use a **model** of the environment to anticipate the resulting states and rewards for future actions. Hence, they are useful for _planning_ scenarios. Methods for solving RL problems that use models are called _model-based methods_, while methods that learn by trial and error are called _model-free methods_.
## Reinforcement Learning Example
Let's finish this introduction let's learn about the popular "hello world" (1) example environment for RL, balancing a pole vertically on a moving cart, called `CartPole`. Then we'll see how to use RLlib to train a policy using a popular RL algorithm, _Proximal Policy Optimization_, again using `CartPole`.
(1) In books and tutorials on programming languages, it is a tradition that the very first program shown prints the message "Hello World!".
### CartPole and OpenAI
The popular [OpenAI "gym" environment](https://gym.openai.com/) provides MDP interfaces to a variety of simulated environments. Perhaps the most popular for learning RL is `CartPole`, a simple environment that simulates the physics of balancing a pole on a moving cart. The `CartPole` problem is described at https://gym.openai.com/envs/CartPole-v1. Here is an image from that website, where the pole is currently falling to the right, which means the cart will need to move to the right to restore balance:

This example fits into the MDP framework as follows:
- The **state** consists of the position and velocity of the cart (moving in one dimension from left to right) as well as the angle and angular velocity of the pole that is balancing on the cart.
- The **actions** are to decrease or increase the cart's velocity by one unit. A negative velocity means it is moving to the left.
- The **transition function** is deterministic and is determined by simulating physical laws. Specifically, for a given **state**, what should we choose as the next velocity value? In the RL context, the correct velocity value to choose has to be learned. Hence, we learn a _policy_ that approximates the optimal transition function that could be calculated from the laws of physics.
- The **reward function** is a constant 1 as long as the pole is upright, and 0 once the pole has fallen over. Therefore, maximizing the reward means balancing the pole for as long as possible.
- The **discount factor** in this case can be taken to be 1, meaning we treat the rewards at all time steps equally and don't discount any of them.
More information about the `gym` Python module is available at https://gym.openai.com/. The list of all the available Gym environments is in [this wiki page](https://github.com/openai/gym/wiki/Table-of-environments). We'll use a few more of them and even create our own in subsequent lessons.
```
import gym
import numpy as np
import pandas as pd
import json
```
The code below illustrates how to create and manipulate MDPs in Python. An MDP can be created by calling `gym.make`. Gym environments are identified by names like `CartPole-v1`. A **catalog of built-in environments** can be found at https://gym.openai.com/envs.
```
env = gym.make("CartPole-v1")
print("Created env:", env)
```
Reset the state of the MDP by calling `env.reset()`. This call returns the initial state of the MDP.
```
state = env.reset()
print("The starting state is:", state)
```
Recall that the state is the position of the cart, its velocity, the angle of the pole, and the angular velocity of the pole.
The `env.step` method takes an action. In the case of the `CartPole` environment, the appropriate actions are 0 or 1, for pushing the cart to the left or right, respectively. `env.step()` returns a tuple of four things:
1. the new state of the environment
2. a reward
3. a boolean indicating whether the simulation has finished
4. a dictionary of miscellaneous extra information
Let's show what happens if we take one step with an action of 0.
```
action = 0
state, reward, done, info = env.step(action)
print(state, reward, done, info)
```
A **rollout** is a simulation of a policy in an environment. It is used both during training and when running simulations with a trained policy.
The code below performs a rollout in a given environment. It takes **random actions** until the simulation has finished and returns the cumulative reward.
```
def random_rollout(env):
state = env.reset()
done = False
cumulative_reward = 0
# Keep looping as long as the simulation has not finished.
while not done:
# Choose a random action (either 0 or 1).
action = np.random.choice([0, 1])
# Take the action in the environment.
state, reward, done, _ = env.step(action)
# Update the cumulative reward.
cumulative_reward += reward
# Return the cumulative reward.
return cumulative_reward
```
Try rerunning the following cell a few times. How much do the answers change? Note that the maximum possible reward for `CartPole-v1` is 500. You'll probably get numbers well under 500.
```
reward = random_rollout(env)
print(reward)
reward = random_rollout(env)
print(reward)
```
### Exercise 1
Choosing actions at random in `random_rollout` is not a very effective policy, as the previous results showed. Finish implementing the `rollout_policy` function below, which takes an environment *and* a policy. Recall that the *policy* is a function that takes in a *state* and returns an *action*. The main difference is that instead of choosing a **random action**, like we just did (with poor results), the action should be chosen **with the policy** (as a function of the state).
> **Note:** Exercise solutions for this tutorial can be found [here](solutions/Ray-RLlib-Solutions.ipynb).
```
def rollout_policy(env, policy):
state = env.reset()
done = False
cumulative_reward = 0
# EXERCISE: Fill out this function by copying the appropriate part of 'random_rollout'
# and modifying it to choose the action using the policy.
raise NotImplementedError
# Return the cumulative reward.
return cumulative_reward
def sample_policy1(state):
return 0 if state[0] < 0 else 1
def sample_policy2(state):
return 1 if state[0] < 0 else 0
reward1 = np.mean([rollout_policy(env, sample_policy1) for _ in range(100)])
reward2 = np.mean([rollout_policy(env, sample_policy2) for _ in range(100)])
print('The first sample policy got an average reward of {}.'.format(reward1))
print('The second sample policy got an average reward of {}.'.format(reward2))
assert 5 < reward1 < 15, ('Make sure that rollout_policy computes the action '
'by applying the policy to the state.')
assert 25 < reward2 < 35, ('Make sure that rollout_policy computes the action '
'by applying the policy to the state.')
```
We'll return to `CartPole` in lesson [01: Application Cart Pole](explore-rllib/01-Application-Cart-Pole.ipynb) in the `explore-rllib` section.
### RLlib Reinforcement Learning Example: Cart Pole with Proximal Policy Optimization
This section demonstrates how to use the _proximal policy optimization_ (PPO) algorithm implemented by [RLlib](http://rllib.io). PPO is a popular way to develop a policy. RLlib also uses [Ray Tune](http://tune.io), the Ray Hyperparameter Tuning framework, which is covered in the [Ray Tune Tutorial](../ray-tune/00-Ray-Tune-Overview.ipynb).
We'll provide relatively little explanation of **RLlib** concepts for now, but explore them in greater depth in subsequent lessons. For more on RLlib, see the documentation at http://rllib.io.
PPO is described in detail in [this paper](https://arxiv.org/abs/1707.06347). It is a variant of _Trust Region Policy Optimization_ (TRPO) described in [this earlier paper](https://arxiv.org/abs/1502.05477). [This OpenAI post](https://openai.com/blog/openai-baselines-ppo/) provides a more accessible introduction to PPO.
PPO works in two phases. In the first phase, a large number of rollouts are performed in parallel. The rollouts are then aggregated on the driver and a surrogate optimization objective is defined based on those rollouts. In the second phase, we use SGD (_stochastic gradient descent_) to find the policy that maximizes that objective with a penalty term for diverging too much from the current policy.

> **NOTE:** The SGD optimization step is best performed in a data-parallel manner over multiple GPUs. This is exposed through the `num_gpus` field of the `config` dictionary. Hence, for normal usage, one or more GPUs is recommended.
(The original version of this example can be found [here](https://raw.githubusercontent.com/ucbrise/risecamp/risecamp2018/ray/tutorial/rllib_exercises/)).
```
import ray
from ray.rllib.agents.ppo import PPOTrainer, DEFAULT_CONFIG
from ray.tune.logger import pretty_print
```
Initialize Ray. If you are running these tutorials on your laptop, then a single-node Ray cluster will be started by the next cell. If you are running in the Anyscale platform, it will connect to the running Ray cluster.
```
info = ray.init(ignore_reinit_error=True, log_to_driver=False)
print(info)
```
> **Tip:** Having trouble starting Ray? See the [Troubleshooting](../reference/Troubleshooting-Tips-Tricks.ipynb) tips.
The next cell prints the URL for the Ray Dashboard. **This is only correct if you are running this tutorial on a laptop.** Click the link to open the dashboard.
If you are running on the Anyscale platform, use the URL provided by your instructor to open the Dashboard.
```
print("Dashboard URL: http://{}".format(info["webui_url"]))
```
Instantiate a PPOTrainer object. We pass in a config object that specifies how the network and training procedure should be configured. Some of the parameters are the following.
- `num_workers` is the number of actors that the agent will create. This determines the degree of parallelism that will be used. In a cluster, these actors will be spread over the available nodes.
- `num_sgd_iter` is the number of epochs of SGD (stochastic gradient descent, i.e., passes through the data) that will be used to optimize the PPO surrogate objective at each iteration of PPO, for each _minibatch_ ("chunk") of training data. Using minibatches is more efficient than training with one record at a time.
- `sgd_minibatch_size` is the SGD minibatch size (batches of data) that will be used to optimize the PPO surrogate objective.
- `model` contains a dictionary of parameters describing the neural net used to parameterize the policy. The `fcnet_hiddens` parameter is a list of the sizes of the hidden layers. Here, we have two hidden layers of size 100, each.
- `num_cpus_per_worker` when set to 0 prevents Ray from pinning a CPU core to each worker, which means we could run out of workers in a constrained environment like a laptop or a cloud VM.
```
config = DEFAULT_CONFIG.copy()
config['num_workers'] = 1
config['num_sgd_iter'] = 30
config['sgd_minibatch_size'] = 128
config['model']['fcnet_hiddens'] = [100, 100]
config['num_cpus_per_worker'] = 0
agent = PPOTrainer(config, 'CartPole-v1')
```
Now let's train the policy on the `CartPole-v1` environment for `N` steps. The JSON object returned by each call to `agent.train()` contains a lot of information we'll inspect below. For now, we'll extract information we'll graph, such as `episode_reward_mean`. The _mean_ values are more useful for determining successful training.
```
N = 10
results = []
episode_data = []
episode_json = []
for n in range(N):
result = agent.train()
results.append(result)
episode = {'n': n,
'episode_reward_min': result['episode_reward_min'],
'episode_reward_mean': result['episode_reward_mean'],
'episode_reward_max': result['episode_reward_max'],
'episode_len_mean': result['episode_len_mean']}
episode_data.append(episode)
episode_json.append(json.dumps(episode))
print(f'{n:3d}: Min/Mean/Max reward: {result["episode_reward_min"]:8.4f}/{result["episode_reward_mean"]:8.4f}/{result["episode_reward_max"]:8.4f}')
```
Now let's convert the episode data to a Pandas `DataFrame` for easy manipulation. The results indicate how much reward the policy is receiving (`episode_reward_*`) and how many time steps of the environment the policy ran (`episode_len_mean`). The maximum possible reward for this problem is `500`. The reward mean and trajectory length are very close because the agent receives a reward of one for every time step that it survives. However, this is specific to this environment and not true in general.
```
df = pd.DataFrame(data=episode_data)
df
df.columns.tolist()
```
Let's plot the data. Since the length and reward means are equal, we'll only plot one line:
```
df.plot(x="n", y=["episode_reward_mean", "episode_reward_min", "episode_reward_max"], secondary_y=True)
```
The model is quickly able to hit the maximum value of 500, but the mean is what's most valuable. After 10 steps, we're more than half way there.
FYI, here are two views of the whole value for one result. First, a "pretty print" output.
> **Tip:** The output will be long. When this happens for a cell, right click and select _Enable scrolling for outputs_.
```
print(pretty_print(results[-1]))
```
We'll learn about more of these values as continue the tutorial.
The whole, long JSON blob, which includes the historical stats about episode rewards and lengths:
```
results[-1]
```
Let's plot the `episode_reward` values:
```
episode_rewards = results[-1]['hist_stats']['episode_reward']
df_episode_rewards = pd.DataFrame(data={'episode':range(len(episode_rewards)), 'reward':episode_rewards})
df_episode_rewards.plot(x="episode", y="reward")
```
For a well-trained model, most runs do very well while occasional runs do poorly. Try plotting other results episodes by changing the array index in `results[-1]` to another number between `0` and `9`. (The length of `results` is `10`.)
### Exercise 2
The current network and training configuration are too large and heavy-duty for a simple problem like `CartPole`. Modify the configuration to use a smaller network (the `config['model']['fcnet_hiddens']` setting) and to speed up the optimization of the surrogate objective. (Fewer SGD iterations and a larger batch size should help.)
```
# Make edits here:
config = DEFAULT_CONFIG.copy()
config['num_workers'] = 3
config['num_sgd_iter'] = 30
config['sgd_minibatch_size'] = 128
config['model']['fcnet_hiddens'] = [100, 100]
config['num_cpus_per_worker'] = 0
agent = PPOTrainer(config, 'CartPole-v1')
```
Train the agent and try to get a reward of 500. If it's training too slowly you may need to modify the config above to use fewer hidden units, a larger `sgd_minibatch_size`, a smaller `num_sgd_iter`, or a larger `num_workers`.
This should take around `N` = 20 or 30 training iterations.
```
N = 5
results = []
episode_data = []
episode_json = []
for n in range(N):
result = agent.train()
results.append(result)
episode = {'n': n,
'episode_reward_mean': result['episode_reward_mean'],
'episode_reward_max': result['episode_reward_max'],
'episode_len_mean': result['episode_len_mean']}
episode_data.append(episode)
episode_json.append(json.dumps(episode))
print(f'Max reward: {episode["episode_reward_max"]}')
```
# Using Checkpoints
You checkpoint the current state of a trainer to save what it has learned. Checkpoints are used for subsequent _rollouts_ and also to continue training later from a known-good state. Calling `agent.save()` creates the checkpoint and returns the path to the checkpoint file, which can be used later to restore the current state to a new trainer. Here we'll load the trained policy into the same process, but often it would be loaded in a new process, for example on a production cluster for serving that is separate from the training cluster.
```
checkpoint_path = agent.save()
print(checkpoint_path)
```
Now load the checkpoint in a new trainer:
```
trained_config = config.copy()
test_agent = PPOTrainer(trained_config, "CartPole-v1")
test_agent.restore(checkpoint_path)
```
Use the previously-trained policy to act in an environment. The key line is the call to `test_agent.compute_action(state)` which uses the trained policy to choose an action. This is an example of _rollout_, which we'll study in a subsequent lesson.
Verify that the cumulative reward received roughly matches up with the reward printed above. It will be at or near 500.
```
env = gym.make("CartPole-v1")
state = env.reset()
done = False
cumulative_reward = 0
while not done:
action = test_agent.compute_action(state) # key line; get the next action
state, reward, done, _ = env.step(action)
cumulative_reward += reward
print(cumulative_reward)
ray.shutdown()
```
The next lesson, [02: Introduction to RLlib](02-Introduction-to-RLlib.ipynb) steps back to introduce to RLlib, its goals and the capabilities it provides.
| github_jupyter |
<a href="https://colab.research.google.com/github/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_05_2_kfold.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# T81-558: Applications of Deep Neural Networks
**Module 5: Regularization and Dropout**
* Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)
* For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/).
# Module 5 Material
* Part 5.1: Part 5.1: Introduction to Regularization: Ridge and Lasso [[Video]](https://www.youtube.com/watch?v=jfgRtCYjoBs&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_05_1_reg_ridge_lasso.ipynb)
* **Part 5.2: Using K-Fold Cross Validation with Keras** [[Video]](https://www.youtube.com/watch?v=maiQf8ray_s&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_05_2_kfold.ipynb)
* Part 5.3: Using L1 and L2 Regularization with Keras to Decrease Overfitting [[Video]](https://www.youtube.com/watch?v=JEWzWv1fBFQ&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_05_3_keras_l1_l2.ipynb)
* Part 5.4: Drop Out for Keras to Decrease Overfitting [[Video]](https://www.youtube.com/watch?v=bRyOi0L6Rs8&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_05_4_dropout.ipynb)
* Part 5.5: Benchmarking Keras Deep Learning Regularization Techniques [[Video]](https://www.youtube.com/watch?v=1NLBwPumUAs&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_05_5_bootstrap.ipynb)
# Google CoLab Instructions
The following code ensures that Google CoLab is running the correct version of TensorFlow.
```
try:
%tensorflow_version 2.x
COLAB = True
print("Note: using Google CoLab")
except:
print("Note: not using Google CoLab")
COLAB = False
```
# Part 5.2: Using K-Fold Cross-validation with Keras
Cross-validation can be used for a variety of purposes in predictive modeling. These include:
* Generating out-of-sample predictions from a neural network
* Estimate a good number of epochs to train a neural network for (early stopping)
* Evaluate the effectiveness of certain hyperparameters, such as activation functions, neuron counts, and layer counts
Cross-validation uses a number of folds, and multiple models, to provide each segment of data a chance to serve as both the validation and training set. Cross validation is shown in Figure 5.CROSS.
**Figure 5.CROSS: K-Fold Crossvalidation**

It is important to note that there will be one model (neural network) for each fold. To generate predictions for new data, which is data not present in the training set, predictions from the fold models can be handled in several ways:
* Choose the model that had the highest validation score as the final model.
* Preset new data to the 5 models (one for each fold) and average the result (this is an [ensemble](https://en.wikipedia.org/wiki/Ensemble_learning)).
* Retrain a new model (using the same settings as the cross-validation) on the entire dataset. Train for as many epochs, and with the same hidden layer structure.
Generally, I prefer the last approach and will retrain a model on the entire data set once I have selected hyper-parameters. Of course, I will always set aside a final holdout set for model validation that I do not use in any aspect of the training process.
### Regression vs Classification K-Fold Cross-Validation
Regression and classification are handled somewhat differently with regards to cross-validation. Regression is the simpler case where you can simply break up the data set into K folds with little regard for where each item lands. For regression it is best that the data items fall into the folds as randomly as possible. It is also important to remember that not every fold will necessarily have exactly the same number of data items. It is not always possible for the data set to be evenly divided into K folds. For regression cross-validation we will use the Scikit-Learn class **KFold**.
Cross validation for classification could also use the **KFold** object; however, this technique would not ensure that the class balance remains the same in each fold as it was in the original. It is very important that the balance of classes that a model was trained on remains the same (or similar) to the training set. A drift in this distribution is one of the most important things to monitor after a trained model has been placed into actual use. Because of this, we want to make sure that the cross-validation itself does not introduce an unintended shift. This is referred to as stratified sampling and is accomplished by using the Scikit-Learn object **StratifiedKFold** in place of **KFold** whenever you are using classification. In summary, the following two objects in Scikit-Learn should be used:
* **KFold** When dealing with a regression problem.
* **StratifiedKFold** When dealing with a classification problem.
The following two sections demonstrate cross-validation with classification and regression.
### Out-of-Sample Regression Predictions with K-Fold Cross-Validation
The following code trains the simple dataset using a 5-fold cross-validation. The expected performance of a neural network, of the type trained here, would be the score for the generated out-of-sample predictions. We begin by preparing a feature vector using the jh-simple-dataset to predict age. This is a regression problem.
```
import pandas as pd
from scipy.stats import zscore
from sklearn.model_selection import train_test_split
# Read the data set
df = pd.read_csv(
"https://data.heatonresearch.com/data/t81-558/jh-simple-dataset.csv",
na_values=['NA','?'])
# Generate dummies for job
df = pd.concat([df,pd.get_dummies(df['job'],prefix="job")],axis=1)
df.drop('job', axis=1, inplace=True)
# Generate dummies for area
df = pd.concat([df,pd.get_dummies(df['area'],prefix="area")],axis=1)
df.drop('area', axis=1, inplace=True)
# Generate dummies for product
df = pd.concat([df,pd.get_dummies(df['product'],prefix="product")],axis=1)
df.drop('product', axis=1, inplace=True)
# Missing values for income
med = df['income'].median()
df['income'] = df['income'].fillna(med)
# Standardize ranges
df['income'] = zscore(df['income'])
df['aspect'] = zscore(df['aspect'])
df['save_rate'] = zscore(df['save_rate'])
df['subscriptions'] = zscore(df['subscriptions'])
# Convert to numpy - Classification
x_columns = df.columns.drop('age').drop('id')
x = df[x_columns].values
y = df['age'].values
```
Now that the feature vector is created a 5-fold cross-validation can be performed to generate out of sample predictions. We will assume 500 epochs, and not use early stopping. Later we will see how we can estimate a more optimal epoch count.
```
import pandas as pd
import os
import numpy as np
from sklearn import metrics
from scipy.stats import zscore
from sklearn.model_selection import KFold
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
# Cross-Validate
kf = KFold(5, shuffle=True, random_state=42) # Use for KFold classification
oos_y = []
oos_pred = []
fold = 0
for train, test in kf.split(x):
fold+=1
print(f"Fold #{fold}")
x_train = x[train]
y_train = y[train]
x_test = x[test]
y_test = y[test]
model = Sequential()
model.add(Dense(20, input_dim=x.shape[1], activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(x_train,y_train,validation_data=(x_test,y_test),verbose=0,
epochs=500)
pred = model.predict(x_test)
oos_y.append(y_test)
oos_pred.append(pred)
# Measure this fold's RMSE
score = np.sqrt(metrics.mean_squared_error(pred,y_test))
print(f"Fold score (RMSE): {score}")
# Build the oos prediction list and calculate the error.
oos_y = np.concatenate(oos_y)
oos_pred = np.concatenate(oos_pred)
score = np.sqrt(metrics.mean_squared_error(oos_pred,oos_y))
print(f"Final, out of sample score (RMSE): {score}")
# Write the cross-validated prediction
oos_y = pd.DataFrame(oos_y)
oos_pred = pd.DataFrame(oos_pred)
oosDF = pd.concat( [df, oos_y, oos_pred],axis=1 )
#oosDF.to_csv(filename_write,index=False)
```
As you can see, the above code also reports the average number of epochs needed. A common technique is to then train on the entire dataset for the average number of epochs needed.
### Classification with Stratified K-Fold Cross-Validation
The following code trains and fits the jh-simple-dataset dataset with cross-validation to generate out-of-sample . It also writes out the out of sample (predictions on the test set) results.
It is good to perform a stratified k-fold cross validation with classification data. This ensures that the percentages of each class remains the same across all folds. To do this, make use of the **StratifiedKFold** object, instead of the **KFold** object used in regression.
```
import pandas as pd
from scipy.stats import zscore
# Read the data set
df = pd.read_csv(
"https://data.heatonresearch.com/data/t81-558/jh-simple-dataset.csv",
na_values=['NA','?'])
# Generate dummies for job
df = pd.concat([df,pd.get_dummies(df['job'],prefix="job")],axis=1)
df.drop('job', axis=1, inplace=True)
# Generate dummies for area
df = pd.concat([df,pd.get_dummies(df['area'],prefix="area")],axis=1)
df.drop('area', axis=1, inplace=True)
# Missing values for income
med = df['income'].median()
df['income'] = df['income'].fillna(med)
# Standardize ranges
df['income'] = zscore(df['income'])
df['aspect'] = zscore(df['aspect'])
df['save_rate'] = zscore(df['save_rate'])
df['age'] = zscore(df['age'])
df['subscriptions'] = zscore(df['subscriptions'])
# Convert to numpy - Classification
x_columns = df.columns.drop('product').drop('id')
x = df[x_columns].values
dummies = pd.get_dummies(df['product']) # Classification
products = dummies.columns
y = dummies.values
```
We will assume 500 epochs, and not use early stopping. Later we will see how we can estimate a more optimal epoch count.
```
import pandas as pd
import os
import numpy as np
from sklearn import metrics
from sklearn.model_selection import StratifiedKFold
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
# np.argmax(pred,axis=1)
# Cross-validate
# Use for StratifiedKFold classification
kf = StratifiedKFold(5, shuffle=True, random_state=42)
oos_y = []
oos_pred = []
fold = 0
# Must specify y StratifiedKFold for
for train, test in kf.split(x,df['product']):
fold+=1
print(f"Fold #{fold}")
x_train = x[train]
y_train = y[train]
x_test = x[test]
y_test = y[test]
model = Sequential()
model.add(Dense(50, input_dim=x.shape[1], activation='relu')) # Hidden 1
model.add(Dense(25, activation='relu')) # Hidden 2
model.add(Dense(y.shape[1],activation='softmax')) # Output
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.fit(x_train,y_train,validation_data=(x_test,y_test),verbose=0,epochs=500)
pred = model.predict(x_test)
oos_y.append(y_test)
# raw probabilities to chosen class (highest probability)
pred = np.argmax(pred,axis=1)
oos_pred.append(pred)
# Measure this fold's accuracy
y_compare = np.argmax(y_test,axis=1) # For accuracy calculation
score = metrics.accuracy_score(y_compare, pred)
print(f"Fold score (accuracy): {score}")
# Build the oos prediction list and calculate the error.
oos_y = np.concatenate(oos_y)
oos_pred = np.concatenate(oos_pred)
oos_y_compare = np.argmax(oos_y,axis=1) # For accuracy calculation
score = metrics.accuracy_score(oos_y_compare, oos_pred)
print(f"Final score (accuracy): {score}")
# Write the cross-validated prediction
oos_y = pd.DataFrame(oos_y)
oos_pred = pd.DataFrame(oos_pred)
oosDF = pd.concat( [df, oos_y, oos_pred],axis=1 )
#oosDF.to_csv(filename_write,index=False)
```
### Training with both a Cross-Validation and a Holdout Set
If you have a considerable amount of data, it is always valuable to set aside a holdout set before you cross-validate. This hold out set will be the final evaluation before you make use of your model for its real-world use. Figure 5.HOLDOUT shows this division.
**Figure 5.HOLDOUT: Cross Validation and a Holdout Set**

The following program makes use of a holdout set, and then still cross-validates.
```
import pandas as pd
from scipy.stats import zscore
from sklearn.model_selection import train_test_split
# Read the data set
df = pd.read_csv(
"https://data.heatonresearch.com/data/t81-558/jh-simple-dataset.csv",
na_values=['NA','?'])
# Generate dummies for job
df = pd.concat([df,pd.get_dummies(df['job'],prefix="job")],axis=1)
df.drop('job', axis=1, inplace=True)
# Generate dummies for area
df = pd.concat([df,pd.get_dummies(df['area'],prefix="area")],axis=1)
df.drop('area', axis=1, inplace=True)
# Generate dummies for product
df = pd.concat([df,pd.get_dummies(df['product'],prefix="product")],axis=1)
df.drop('product', axis=1, inplace=True)
# Missing values for income
med = df['income'].median()
df['income'] = df['income'].fillna(med)
# Standardize ranges
df['income'] = zscore(df['income'])
df['aspect'] = zscore(df['aspect'])
df['save_rate'] = zscore(df['save_rate'])
df['subscriptions'] = zscore(df['subscriptions'])
# Convert to numpy - Classification
x_columns = df.columns.drop('age').drop('id')
x = df[x_columns].values
y = df['age'].values
from sklearn.model_selection import train_test_split
import pandas as pd
import os
import numpy as np
from sklearn import metrics
from scipy.stats import zscore
from sklearn.model_selection import KFold
# Keep a 10% holdout
x_main, x_holdout, y_main, y_holdout = train_test_split(
x, y, test_size=0.10)
# Cross-validate
kf = KFold(5)
oos_y = []
oos_pred = []
fold = 0
for train, test in kf.split(x_main):
fold+=1
print(f"Fold #{fold}")
x_train = x_main[train]
y_train = y_main[train]
x_test = x_main[test]
y_test = y_main[test]
model = Sequential()
model.add(Dense(20, input_dim=x.shape[1], activation='relu'))
model.add(Dense(5, activation='relu'))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(x_train,y_train,validation_data=(x_test,y_test),
verbose=0,epochs=500)
pred = model.predict(x_test)
oos_y.append(y_test)
oos_pred.append(pred)
# Measure accuracy
score = np.sqrt(metrics.mean_squared_error(pred,y_test))
print(f"Fold score (RMSE): {score}")
# Build the oos prediction list and calculate the error.
oos_y = np.concatenate(oos_y)
oos_pred = np.concatenate(oos_pred)
score = np.sqrt(metrics.mean_squared_error(oos_pred,oos_y))
print()
print(f"Cross-validated score (RMSE): {score}")
# Write the cross-validated prediction (from the last neural network)
holdout_pred = model.predict(x_holdout)
score = np.sqrt(metrics.mean_squared_error(holdout_pred,y_holdout))
print(f"Holdout score (RMSE): {score}")
```
| github_jupyter |
# HLCA Figure 2
Here we will generate the figures from the HLCA pre-print, figure 2. Figure 2d was generated separately in R, using code from integration benchmarking framework 'scIB'.
### import modules, set paths and parameters:
```
import scanpy as sc
import pandas as pd
import numpy as np
import sys
import os
from collections import Counter
sys.path.append("../../scripts/")
import reference_based_harmonizing
import celltype_composition_plotting
import plotting
import sankey
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.colors import to_hex
import ast
sc.set_figure_params(
dpi=140,
fontsize=12,
frameon=False,
transparent=True,
)
sns.set_style(style="white")
sns.set_context(context="paper")
```
for pretty code formatting (not needed to run notebook):
```
%load_ext lab_black
```
paths:
```
path_HLCA = "../../data/HLCA_core_h5ads/HLCA_v1.h5ad"
path_celltype_reference = "../../supporting_files/metadata_harmonization/HLCA_cell_type_reference_mapping_20211103.csv"
dir_figures = "../../figures"
```
## Generate figures:
initiate empty dictionary in which to store paper figures.
```
FIGURES = dict()
```
for automatic script updating and pretty coding (not necessary for code to run!)
```
adata = sc.read(path_HLCA)
```
#### Overview of stats (number of studies, cells, annotations etc.):
Number of studies, datasets, subjects, samples, cells:
```
print("Number of studies:", len(set(adata.obs.study)))
print("Number of datasets:", len(set(adata.obs.dataset)))
print("Number of subjects:", len(set(adata.obs.subject_ID)))
print("Number of samples:", len(set(adata.obs["sample"])))
print("Number of cells:", adata.obs.shape[0])
```
Proportions of cell compartments in the HLCA:
```
original_ann_lev_1_percs = np.round(
adata.obs.original_ann_level_1.value_counts() / adata.n_obs * 100, 1
)
print("Original annotation proportions (level 1):")
print(original_ann_lev_1_percs)
```
Perc. of cells annotated per level:
```
for level in range(1, 6):
n_unannotated = np.sum(
[
isnone or isnull
for isnone, isnull in zip(
adata.obs[f"original_ann_level_{level}_clean"].values == "None",
pd.isnull(adata.obs[f"original_ann_level_{level}_clean"].values),
)
]
)
n_annotated = adata.n_obs - n_unannotated
print(
f"Perc. originally annotated at level {level}: {round(n_annotated/adata.n_obs*100,1)}"
)
```
Distribution of demographics:
```
print(f"Min. and max. age: {adata.obs.age.min()}, {adata.obs.age.max()}")
adata.obs.sex.value_counts() / adata.n_obs * 100
adata.obs.ethnicity.value_counts() / adata.n_obs * 100
print(f"Min. and max. BMI: {adata.obs.BMI.min()}, {adata.obs.BMI.max()}")
adata.obs.smoking_status.value_counts() / adata.n_obs * 100
```
## figures:
Overview of subjects, samples, and cells per study (not in the paper):
```
plotting.plot_dataset_statistics(adata, fontsize=8, figheightscale=3.5)
```
### 2a Subject/sample distributions
Re-map ethnicities:
```
ethnicity_remapper = {
"asian": "asian",
"black": "black",
"latino": "latino",
"mixed": "mixed",
"nan": "nan",
"pacific islander": "other",
"white": "white",
}
adata.obs.ethnicity = adata.obs.ethnicity.map(ethnicity_remapper)
```
Plot subject demographic and sample anatomical location distributions:
```
FIGURES["2a_subject_and_sample_stats"] = plotting.plot_subject_and_sample_stats_incl_na(
adata, return_fig=True
)
```
## 2b Cell type composition sankey plot, level 1-3:
First, generate a color mapping. We want to map cell types from the same compartment in the same shade (e.g. epithilial orange/red, endothelial purple), at all levels. We'll need to incorporate our hierarchical cell type reference for that, and then calculate the colors per level. That is done with the code below:
```
harmonizing_df = reference_based_harmonizing.load_harmonizing_table(
path_celltype_reference
)
consensus_df = reference_based_harmonizing.create_consensus_table(harmonizing_df)
max_level = 5
color_prop_df = celltype_composition_plotting.calculate_hierarchical_coloring_df(
adata,
consensus_df,
max_level,
lev1_colormap_dict={
"Epithelial": "Oranges",
"Immune": "Greens",
"Endothelial": "Purples",
"Stroma": "Blues",
"Proliferating cells": "Reds",
},
ann_level_name_prefix="original_ann_level_",
)
```
Set minimum percentage among plotted cells for a cell type to be included. This prevents the plot from becoming overcrowded with labels and including lines that are too thin to even see:
```
min_ct_perc = 0.02
```
Now generate the two sankey plots.
```
fig, ax = plt.subplots(figsize=(8, 8))
cts_ordered_left_lev1 = [
ct
for ct in color_prop_df.l1_label
if ct in adata.obs.original_ann_level_1_clean.values
]
ct_to_color_lev1 = {
ct: col for ct, col in zip(color_prop_df.l1_label, color_prop_df.l1_rgba)
}
# get level 1 anns:
y_lev1 = adata.obs.original_ann_level_1_clean
lev1_percs = {ct: n / len(y_lev1) * 100 for ct, n in Counter(y_lev1).items()}
lev1_ct_to_keep = [ct for ct, perc in lev1_percs.items() if perc > min_ct_perc]
# get level 1 anns, set "None" in level 2 compartment specific,
# remove cell types that make up less than min_ct_perc of cells plotted
y_lev2 = adata.obs.original_ann_level_2_clean.cat.remove_unused_categories()
y_lev2 = [
f"{ct} ({lev1ann})" if ct == "None" else ct
for ct, lev1ann in zip(y_lev2, adata.obs.original_ann_level_1_clean)
]
lev2_percs = {ct: n / len(y_lev2) * 100 for ct, n in Counter(y_lev2).items()}
lev2_ct_to_keep = [ct for ct, perc in lev2_percs.items() if perc > min_ct_perc]
# plot sankeyy
sankey.sankey(
x=[
lev1
for lev1, lev2 in zip(y_lev1, list(y_lev2))
if lev1 in lev1_ct_to_keep and lev2 in lev2_ct_to_keep
],
y=[
lev2
for lev1, lev2 in zip(y_lev1, list(y_lev2))
if lev1 in lev1_ct_to_keep and lev2 in lev2_ct_to_keep
],
title="Hierarchical cell type annotation",
title_left="Level 1",
title_right="Level 2",
ax=ax,
fontsize="x-small",
left_order=cts_ordered_left_lev1,
colors={
ct: to_hex(ast.literal_eval(ct_to_color_lev1[ct]))
for ct in cts_ordered_left_lev1
},
alpha=0.8,
)
plt.show()
plt.close()
FIGURES["2b_sankey_1_2"] = fig
fig, ax = plt.subplots(figsize=(8, 8))
# use order from earlier sankey plot
cts_ordered_left_lev2 = [
ct
for ct in [
"Airway epithelium",
"Alveolar epithelium",
"Submucosal Gland",
"None (Epithelial)",
"Myeloid",
"Lymphoid",
"Megakaryocytic and erythroid",
"Granulocytes",
"Blood vessels",
"Lymphatic EC",
"None (Endothelial)",
"Fibroblast lineage",
"Smooth muscle",
"None (Stroma)",
"Mesothelium",
"None (Proliferating cells)",
]
if ct in lev2_ct_to_keep
]
# ct for ct in color_prop_df.l2_label if ct in adata.obs.ann_level_2_clean.values
# ]
ct_to_color_lev2 = {
ct: col for ct, col in zip(color_prop_df.l2_label, color_prop_df.l2_rgba)
}
# manually locate colors fo "None" cell type annotations:
for none_ct in "Epithelial", "Endothelial", "Stroma", "Proliferating cells":
ct_to_color_lev2[f"None ({none_ct})"] = color_prop_df.loc[
color_prop_df.l1_label == none_ct, "l1_rgba"
].values[0]
y_lev3 = adata.obs.original_ann_level_3_clean
y_lev3 = [
f"{ct} ({lev1ann})" if ct.startswith("None") else ct
for ct, lev1ann in zip(y_lev3, adata.obs.original_ann_level_1_clean)
]
lev3_percs = {ct: n / len(y_lev3) * 100 for ct, n in Counter(y_lev3).items()}
lev3_ct_to_keep = [ct for ct, perc in lev3_percs.items() if perc > min_ct_perc]
sankey.sankey(
x=[
lev2
for lev2, lev3 in zip(y_lev2, list(y_lev3))
if lev2 in lev2_ct_to_keep and lev3 in lev3_ct_to_keep
],
y=[
lev3
for lev2, lev3 in zip(y_lev2, list(y_lev3))
if lev2 in lev2_ct_to_keep and lev3 in lev3_ct_to_keep
],
title="Hierarchical cell type annotation",
title_left="Level 2",
title_right="Level 3",
ax=ax,
fontsize=5, # "xx-small",
left_order=cts_ordered_left_lev2,
colors={
ct: to_hex(ast.literal_eval(ct_to_color_lev2[ct]))
for ct in cts_ordered_left_lev2
},
alpha=0.8,
)
plt.show()
plt.close()
FIGURES["2b_sankey_2_3"] = fig
```
### 2c Sample compositions:
In the paper we use ann level 2 and group by sample:
```
ann_level_number = "2"
grouping_covariate = "sample" # choose e.g. "dataset" or "subject_ID" or "sample"
```
Use the "clean" version, i.e. without forward-propagated labels for cells not annotated at the chosen label, but leaving those cells set to "None":
```
if ann_level_number == "1":
ann_level = "original_ann_level_" + ann_level_number
else:
ann_level = "original_ann_level_" + ann_level_number + "_clean"
```
Now plot:
```
FIGURES[
"2c_sample_compositions"
] = celltype_composition_plotting.plot_celltype_composition_per_sample(
adata,
ann_level_number,
color_prop_df,
return_fig=True,
title="original cell type annotations (level 2) per sample",
ann_level_name_prefix="original_ann_level_",
)
```
# Store figures
```
# for figname, fig in FIGURES.items():
# print("Saving", figname)
# fig.savefig(os.path.join(dir_figures, f"{figname}.png"), bbox_inches="tight", dpi=140)
```
| github_jupyter |
# OOP Syntax Exercise - Part 2
Now that you've had some practice instantiating objects, it's time to write your own class from scratch. This lesson has two parts. In the first part, you'll write a Pants class. This class is similar to the shirt class with a couple of changes. Then you'll practice instantiating Pants objects
In the second part, you'll write another class called SalesPerson. You'll also instantiate objects for the SalesPerson.
For this exercise, you can do all of your work in this Jupyter notebook. You will not need to import the class because all of your code will be in this Jupyter notebook.
Answers are also provided. If you click on the Jupyter icon, you can open a folder called 2.OOP_syntax_pants_practice, which contains this Jupyter notebook ('exercise.ipynb') and a file called answer.py.
# Pants class
Write a Pants class with the following characteristics:
* the class name should be Pants
* the class attributes should include
* color
* waist_size
* length
* price
* the class should have an init function that initializes all of the attributes
* the class should have two methods
* change_price() a method to change the price attribute
* discount() to calculate a discount
```
### TODO:
# - code a Pants class with the following attributes
# - color (string) eg 'red', 'yellow', 'orange'
# - waist_size (integer) eg 8, 9, 10, 32, 33, 34
# - length (integer) eg 27, 28, 29, 30, 31
# - price (float) eg 9.28
### TODO: Declare the Pants Class
### TODO: write an __init__ function to initialize the attributes
### TODO: write a change_price method:
# Args:
# new_price (float): the new price of the shirt
# Returns:
# None
### TODO: write a discount method:
# Args:
# discount (float): a decimal value for the discount.
# For example 0.05 for a 5% discount.
#
# Returns:
# float: the discounted price
class Pants:
"""The Pants class represents an article of clothing sold in a store
"""
def __init__(self, color, waist_size, length, price):
"""Method for initializing a Pants object
Args:
color (str)
waist_size (int)
length (int)
price (float)
Attributes:
color (str): color of a pants object
waist_size (str): waist size of a pants object
length (str): length of a pants object
price (float): price of a pants object
"""
self.color = color
self.waist_size = waist_size
self.length = length
self.price = price
def change_price(self, new_price):
"""The change_price method changes the price attribute of a pants object
Args:
new_price (float): the new price of the pants object
Returns: None
"""
self.price = new_price
def discount(self, percentage):
"""The discount method outputs a discounted price of a pants object
Args:
percentage (float): a decimal representing the amount to discount
Returns:
float: the discounted price
"""
return self.price * (1 - percentage)
class SalesPerson:
"""The SalesPerson class represents an employee in the store
"""
def __init__(self, first_name, last_name, employee_id, salary):
"""Method for initializing a SalesPerson object
Args:
first_name (str)
last_name (str)
employee_id (int)
salary (float)
Attributes:
first_name (str): first name of the employee
last_name (str): last name of the employee
employee_id (int): identification number of the employee
salary (float): yearly salary of the employee
pants_sold (list): a list of pants objects sold by the employee
total_sales (float): sum of all sales made by the employee
"""
self.first_name = first_name
self.last_name = last_name
self.employee_id = employee_id
self.salary = salary
self.pants_sold = []
self.total_sales = 0
def sell_pants(self, pants_object):
"""The sell_pants method appends a pants object to the pants_sold attribute
Args:
pants_object (obj): a pants object that was sold
Returns: None
"""
self.pants_sold.append(pants_object)
def display_sales(self):
"""The display_sales method prints out all pants that have been sold
Args: None
Returns: None
"""
for pants in self.pants_sold:
print('color: {}, waist_size: {}, length: {}, price: {}'\
.format(pants.color, pants.waist_size, pants.length, pants.price))
def calculate_sales(self):
"""The calculate_sales method sums the total price of all pants sold
Args: None
Returns:
float: sum of the price for all pants sold
"""
total = 0
for pants in self.pants_sold:
total += pants.price
self.total_sales = total
return total
def calculate_commission(self, percentage):
"""The calculate_commission method outputs the commission based on sales
Args:
percentage (float): the commission percentage as a decimal
Returns:
float: the commission due
"""
sales_total = self.calculate_sales()
return sales_total * percentage
```
# Run the code cell below to check results
If you run the next code cell and get an error, then revise your code until the code cell doesn't output anything.
```
def check_results():
pants = Pants('red', 35, 36, 15.12)
assert pants.color == 'red'
assert pants.waist_size == 35
assert pants.length == 36
assert pants.price == 15.12
pants.change_price(10) == 10
assert pants.price == 10
assert pants.discount(.1) == 9
print('You made it to the end of the check. Nice job!')
check_results()
```
# SalesPerson class
The Pants class and Shirt class are quite similar. Here is an exercise to give you more practice writing a class. **This exercise is trickier than the previous exercises.**
Write a SalesPerson class with the following characteristics:
* the class name should be SalesPerson
* the class attributes should include
* first_name
* last_name
* employee_id
* salary
* pants_sold
* total_sales
* the class should have an init function that initializes all of the attributes
* the class should have four methods
* sell_pants() a method to change the price attribute
* calculate_sales() a method to calculate the sales
* display_sales() a method to print out all the pants sold with nice formatting
* calculate_commission() a method to calculate the salesperson commission based on total sales and a percentage
```
### TODO:
# Code a SalesPerson class with the following attributes
# - first_name (string), the first name of the salesperson
# - last_name (string), the last name of the salesperson
# - employee_id (int), the employee ID number like 5681923
# - salary (float), the monthly salary of the employee
# - pants_sold (list of Pants objects),
# pants that the salesperson has sold
# - total_sales (float), sum of sales of pants sold
### TODO: Declare the SalesPerson Class
### TODO: write an __init__ function to initialize the attributes
### Input Args for the __init__ function:
# first_name (str)
# last_name (str)
# employee_id (int)
# . salary (float)
#
# You can initialize pants_sold as an empty list
# You can initialize total_sales to zero.
#
###
### TODO: write a sell_pants method:
#
# This method receives a Pants object and appends
# the object to the pants_sold attribute list
#
# Args:
# pants (Pants object): a pants object
# Returns:
# None
### TODO: write a display_sales method:
#
# This method has no input or outputs. When this method
# is called, the code iterates through the pants_sold list
# and prints out the characteristics of each pair of pants
# line by line. The print out should look something like this
#
# color: blue, waist_size: 34, length: 34, price: 10
# color: red, waist_size: 36, length: 30, price: 14.15
#
#
#
###
### TODO: write a calculate_sales method:
# This method calculates the total sales for the sales person.
# The method should iterate through the pants_sold attribute list
# and sum the prices of the pants sold. The sum should be stored
# in the total_sales attribute and then return the total.
#
# Args:
# None
# Returns:
# float: total sales
#
###
### TODO: write a calculate_commission method:
#
# The salesperson receives a commission based on the total
# sales of pants. The method receives a percentage, and then
# calculate the total sales of pants based on the price,
# and then returns the commission as (percentage * total sales)
#
# Args:
# percentage (float): comission percentage as a decimal
#
# Returns:
# float: total commission
#
#
###
```
# Run the code cell below to check results
If you run the next code cell and get an error, then revise your code until the code cell doesn't output anything.
```
def check_results():
pants_one = Pants('red', 35, 36, 15.12)
pants_two = Pants('blue', 40, 38, 24.12)
pants_three = Pants('tan', 28, 30, 8.12)
salesperson = SalesPerson('Amy', 'Gonzalez', 2581923, 40000)
assert salesperson.first_name == 'Amy'
assert salesperson.last_name == 'Gonzalez'
assert salesperson.employee_id == 2581923
assert salesperson.salary == 40000
assert salesperson.pants_sold == []
assert salesperson.total_sales == 0
salesperson.sell_pants(pants_one)
salesperson.pants_sold[0] == pants_one.color
salesperson.sell_pants(pants_two)
salesperson.sell_pants(pants_three)
assert len(salesperson.pants_sold) == 3
assert round(salesperson.calculate_sales(),2) == 47.36
assert round(salesperson.calculate_commission(.1),2) == 4.74
print('Great job, you made it to the end of the code checks!')
check_results()
```
### Check display_sales() method
If you run the code cell below, you should get output similar to this:
```python
color: red, waist_size: 35, length: 36, price: 15.12
color: blue, waist_size: 40, length: 38, price: 24.12
color: tan, waist_size: 28, length: 30, price: 8.12
```
```
pants_one = Pants('red', 35, 36, 15.12)
pants_two = Pants('blue', 40, 38, 24.12)
pants_three = Pants('tan', 28, 30, 8.12)
salesperson = SalesPerson('Amy', 'Gonzalez', 2581923, 40000)
salesperson.sell_pants(pants_one)
salesperson.sell_pants(pants_two)
salesperson.sell_pants(pants_three)
salesperson.display_sales()
```
# Solution
As a reminder, answers are also provided. If you click on the Jupyter icon, you can open a folder called 2.OOP_syntax_pants_practice, which contains this Jupyter notebook and a file called answer.py.
| github_jupyter |
# YBIGTA ML PROJECT / 염정운
## Setting
```
import numpy as np
import pandas as pd
pd.set_option("max_columns", 999)
pd.set_option("max_rows", 999)
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
import seaborn as sns
import matplotlib.pyplot as plt
#sns.set(rc={'figure.figsize':(11.7,10)})
```
## Identity data
Variables in this table are identity information – network connection information (IP, ISP, Proxy, etc) and digital signature
<br>
(UA/browser/os/version, etc) associated with transactions.
<br>
They're collected by Vesta’s fraud protection system and digital security partners.
<br>
The field names are masked and pairwise dictionary will not be provided for privacy protection and contract agreement)
Categorical Features:
<br>
DeviceType
<br>
DeviceInfo
<br>
id12 - id38
```
#train_identity가 불편해서 나는 i_merged라는 isFraud를 merge하고 column 순서를 조금 바꾼 새로운 Dataframe을 만들었어! 이건 그 코드!
#i_merged = train_i.merge(train_t[['TransactionID', 'isFraud']], how = 'left', on = 'TransactionID')
#order_list =['TransactionID', 'isFraud', 'DeviceInfo', 'DeviceType', 'id_01', 'id_02', 'id_03', 'id_04', 'id_05', 'id_06', 'id_07', 'id_08',
# 'id_09', 'id_10', 'id_11', 'id_12', 'id_13', 'id_14', 'id_15', 'id_16', 'id_17', 'id_18', 'id_19', 'id_20', 'id_21',
# 'id_22', 'id_23', 'id_24', 'id_25', 'id_26', 'id_27', 'id_28', 'id_29', 'id_30', 'id_31', 'id_32', 'id_33', 'id_34',
# 'id_35', 'id_36', 'id_37', 'id_38']
#i_merged = i_merged[order_list]
#i_merged.head()
#i_merged.to_csv('identity_merged.csv', index = False)
save = pd.read_csv('identity_merged.csv')
i_merged = pd.read_csv('identity_merged.csv')
```
### <font color='blue'>NaN 비율</font>
```
nullrate = (((i_merged.isnull().sum() / len(i_merged)))*100).sort_values(ascending = False)
nullrate.plot(kind='barh', figsize=(15, 9))
i_merged.head()
```
### <font color='blue'>DeviceType</font>
nan(3.1%) < desktop(6.5%) < mobile(10.1%) 순으로 isFraud 증가 추이
<br>
*전체 datatset에서 isFraud = 1의 비율 7.8%
```
#DeviceType
i_merged.groupby(['DeviceType', 'isFraud']).size().unstack()
i_merged[i_merged.DeviceType.isnull()].groupby('isFraud').size()
```
### <font color='blue'>Null count in row</font>
결측치 정도와 isFraud의 유의미한 상관관계 찾지 못함
```
i_merged = i_merged.assign(NaN_count = i_merged.isnull().sum(axis = 1))
print(i_merged.assign(NaN_count = i_merged.isnull().sum(axis = 1)).groupby('isFraud')['NaN_count'].mean(),
i_merged.assign(NaN_count = i_merged.isnull().sum(axis = 1)).groupby('isFraud')['NaN_count'].std(),
i_merged.assign(NaN_count = i_merged.isnull().sum(axis = 1)).groupby('isFraud')['NaN_count'].min(),
i_merged.assign(NaN_count = i_merged.isnull().sum(axis = 1)).groupby('isFraud')['NaN_count'].max())
#isFraud = 1
i_merged[i_merged.isFraud == 1].hist('NaN_count')
#isFraud = 0
i_merged[i_merged.isFraud == 0].hist('NaN_count')
i_merged.head()
```
### <font color='blue'>변수별 EDA - Continous</font>
```
#Correlation Matrix
rs = np.random.RandomState(0)
df = pd.DataFrame(rs.rand(10, 10))
corr = i_merged.corr()
corr.style.background_gradient(cmap='coolwarm')
#id_01 : 0 이하의 값들을 가지며 skewed 형태. 필요시 log 변환을 통한 처리가 가능할 듯.
i_merged.id_01.plot(kind='hist', bins=22, figsize=(12,6), title='id_01 dist.')
print(i_merged.groupby('isFraud')['id_01'].mean(),
i_merged.groupby('isFraud')['id_01'].std(),
i_merged.id_01.min(),
i_merged.id_01.max(), sep = '\n')
Fraud = (i_merged[i_merged.isFraud == 1]['id_01'])
notFraud = i_merged[i_merged.isFraud == 0]['id_01']
plt.hist([Fraud, notFraud],bins = 5, label=['Fraud', 'notFraud'])
plt.legend(loc='upper left')
plt.show()
#id02: 최솟값 1을 가지며 skewed 형태. 마찬가지로 로그 변환 가능
i_merged.id_02.plot(kind='hist', bins=22, figsize=(12,6), title='id_02 dist.')
print(i_merged.groupby('isFraud')['id_02'].mean(),
i_merged.groupby('isFraud')['id_02'].std(),
i_merged.id_02.min(),
i_merged.id_02.max(), sep = '\n')
Fraud = (i_merged[i_merged.isFraud == 1]['id_02'])
notFraud = i_merged[i_merged.isFraud == 0]['id_02']
plt.hist([Fraud, notFraud],bins = 5, label=['Fraud', 'notFraud'])
plt.legend(loc='upper left')
plt.show()
#id_05
i_merged.id_05.plot(kind='hist', bins=22, figsize=(9,6), title='id_05 dist.')
print(i_merged.groupby('isFraud')['id_05'].mean(),
i_merged.groupby('isFraud')['id_05'].std())
Fraud = (i_merged[i_merged.isFraud == 1]['id_05'])
notFraud = i_merged[i_merged.isFraud == 0]['id_05']
plt.hist([Fraud, notFraud],bins = 10, label=['Fraud', 'notFraud'])
plt.legend(loc='upper left')
plt.show()
#id_06
i_merged.id_06.plot(kind='hist', bins=22, figsize=(12,6), title='id_06 dist.')
print(i_merged.groupby('isFraud')['id_06'].mean(),
i_merged.groupby('isFraud')['id_06'].std())
Fraud = (i_merged[i_merged.isFraud == 1]['id_06'])
notFraud = i_merged[i_merged.isFraud == 0]['id_06']
plt.hist([Fraud, notFraud],bins = 20, label=['Fraud', 'notFraud'])
plt.legend(loc='upper left')
plt.show()
#id_11
i_merged.id_11.plot(kind='hist', bins=22, figsize=(12,6), title='id_11 dist.')
print(i_merged.groupby('isFraud')['id_11'].mean(),
i_merged.groupby('isFraud')['id_11'].std())
Fraud = (i_merged[i_merged.isFraud == 1]['id_11'])
notFraud = i_merged[i_merged.isFraud == 0]['id_11']
plt.hist([Fraud, notFraud],bins = 20, label=['Fraud', 'notFraud'])
plt.legend(loc='upper left')
plt.show()
```
### <font color='blue'>변수별 EDA - Categorical</font>
```
sns.jointplot(x = 'id_09', y = 'id_03', data = i_merged)
```
### <font color='blue'>Feature Engineering</font>
<br>
<br>
** Categorical이지만 가짓수가 많은 경우 정보가 있을 때 1, 아닐 때 0으로 처리함. BaseModel 돌리기 위해 이렇게 설정하였지만, 전처리를 바꿔가는 작업에서는 이 변수들을 다른 방식으로 처리 할 필요가 더 생길 수도 있음.
<br>
** Pair 관계가 있음. id03,04 / id05,06 / id07,08, 21~26 / id09, 10 ::함께 데이터가 존재하거나(1) NaN이거나(0). 한편 EDA-Category를 보면 id03, 09의 경우 상관관계가 있는 것으로 추정되어 추가적인 변형을 하지 않았음.
<br>
** https://www.kaggle.com/pablocanovas/exploratory-analysis-tidyverse 에서 변수별 EDA 시각화 참고하였고, nan값 제외하고는 Fraud 비율이 낮은 변수부터 1,2..차례로 할당함
<br>
<br>
<br>
### $Contionous Features$
<br>
id01:: 결측치가 없으며 로그변형을 통해 양수화 및 Scailing 시킴. 5의 배수임을 감안할 때 5로 나누는 scailing을 진행해봐도 좋을 듯.
<br>
id02:: 결측치가 존재하나, 로그 변형을 통해 정규분포에 흡사한 모양으로 만들고 매우 큰 단위를 Scailing하였음. 결측치는 Random 방식을 이용하여 채웠으나 가장 위험한 방식으로 imputation으로 한 것이므로 주의가 필요함.
<br>
<br>
<br>
### $Categorical Features$
<br>
DeviceType:: {NaN: 0, 'desktop': 1, 'mobile': 2}
<br>
DeviceInfo:: {Nan: 0, 정보있음:1}
<br>
id12::{0:0, 'Found': 1, 'NotFound': 2}
<br>
id13::{Nan: 0, 정보있음:1}
<br>
id14::{Nan: 0, 정보있음:1}
<br>
id15::{Nan:0, 'New':1, 'Unknown':2, 'Found':3} #15, 16은 연관성이 보임
<br>
id16::{Nan:0, 'NotFound':1, 'Found':2}
<br>
id17::{Nan: 0, 정보있음:1}
<br>
id18::{Nan: 0, 정보있음:1} #가짓수 다소 적음
<br>
id19::{Nan: 0, 정보있음:1}
<br>
id20::{Nan: 0, 정보있음:1} #id 17, 19, 20은 Pair
<br>
id21
<br>
id22
<br>
id23::{IP_PROXY:ANONYMOUS:2, else:1, nan:0} #id 7,8 21~26은 Pair. Anonymous만 유독 Fraud 비율이 높기에 고려함. 우선은 베이스 모델에서는 id_23만 사용
<br>
id24
<br>
id25
<br>
id26
<br>
id27:: {Nan:0, 'NotFound':1, 'Found':2}
<br>
id28:: {0:0, 'New':1, 'Found':2}
<br>
id29:: {0:0, 'NotFound':1, 'Found':2}
<br>
id30(OS):: {Nan: 0, 정보있음:1}, 데이터가 있다 / 없다로 처리하였지만 Safari Generic에서 사기 확률이 높다 등의 조건을 고려해야한다면 다른 방식으로 전처리 필요할 듯
<br>
id31(browser):: {Nan: 0, 정보있음:1}, id30과 같음
<br>
id32::{nan:0, 24:1, 32:2, 16:3, 0:4}
<br>
id33(해상도)::{Nan: 0, 정보있음:1}
<br>
id34:: {nan:0, matchstatus= -1:1, matchstatus=0 :2, matchstatus=1 :3, matchstatus=2 :4} , matchstatus가 -1이면 fraud일 확률 매우 낮음
<br>
id35:: {Nan:0, 'T':1, 'F':2}
<br>
id36:: {Nan:0, 'T':1, 'F':2}
<br>
id37:: {Nan:0, 'T':2, 'F':1}
<br>
id38:: {Nan:0, 'T':1, 'F':2}
<br>
```
#Continous Features
i_merged.id_01 = np.log(-i_merged.id_01 + 1)
i_merged.id_02 = np.log(i_merged.id_02)
medi = i_merged.id_02.median()
i_merged.id_02 = i_merged.id_02.fillna(medi)
i_merged.id_02.hist()
#id_02의 NaN값을 random하게 채워줌
#i_merged['id_02_filled'] = i_merged['id_02']
#temp = (i_merged['id_02'].dropna()
# .sample(i_merged['id_02'].isnull().sum())
# )
#temp.index = i_merged[lambda x: x.id_02.isnull()].index
#i_merged.loc[i_merged['id_02'].isnull(), 'id_02_filled'] = temp
#Categorical Features
i_merged.DeviceType = i_merged.DeviceType.fillna(0).map({0:0, 'desktop': 1, 'mobile': 2})
i_merged.DeviceInfo = i_merged.DeviceInfo.notnull().astype(int)
i_merged.id_12 = i_merged.id_12.fillna(0).map({0:0, 'Found': 1, 'NotFound': 2})
i_merged.id_13 = i_merged.id_13.notnull().astype(int)
i_merged.id_14 = i_merged.id_14.notnull().astype(int)
i_merged.id_14 = i_merged.id_14.notnull().astype(int)
i_merged.id_15 = i_merged.id_15.fillna(0).map({0:0, 'New':1, 'Unknown':2, 'Found':3})
i_merged.id_16 = i_merged.id_16.fillna(0).map({0:0, 'NotFound':1, 'Found':2})
i_merged.id_17 = i_merged.id_17.notnull().astype(int)
i_merged.id_18 = i_merged.id_18.notnull().astype(int)
i_merged.id_19 = i_merged.id_19.notnull().astype(int)
i_merged.id_20 = i_merged.id_20.notnull().astype(int)
i_merged.id_23 = i_merged.id_23.fillna('temp').map({'temp':0, 'IP_PROXY:ANONYMOUS':2}).fillna(1)
i_merged.id_27 = i_merged.id_27.fillna(0).map({0:0, 'NotFound':1, 'Found':2})
i_merged.id_28 = i_merged.id_28.fillna(0).map({0:0, 'New':1, 'Found':2})
i_merged.id_29 = i_merged.id_29.fillna(0).map({0:0, 'NotFound':1, 'Found':2})
i_merged.id_30 = i_merged.id_30.notnull().astype(int)
i_merged.id_31 = i_merged.id_31.notnull().astype(int)
i_merged.id_32 = i_merged.id_32.fillna('temp').map({'temp':0, 24:1, 32:2, 16:3, 0:4})
i_merged.id_33 = i_merged.id_33.notnull().astype(int)
i_merged.id_34 = i_merged.id_34.fillna('temp').map({'temp':0, 'match_status:-1':1, 'match_status:0':3, 'match_status:1':4, 'match_status:2':2})
i_merged.id_35 = i_merged.id_35.fillna(0).map({0:0, 'T':1, 'F':2})
i_merged.id_36 = i_merged.id_38.fillna(0).map({0:0, 'T':1, 'F':2})
i_merged.id_37 = i_merged.id_38.fillna(0).map({0:0, 'T':2, 'F':1})
i_merged.id_38 = i_merged.id_38.fillna(0).map({0:0, 'T':1, 'F':2})
```
Identity_Device FE
```
i_merged['Device_info_clean'] = i_merged['DeviceInfo']
i_merged['Device_info_clean'] = i_merged['Device_info_clean'].fillna('unknown')
def name_divide(name):
if name == 'Windows':
return 'Windows'
elif name == 'iOS Device':
return 'iOS Device'
elif name == 'MacOS':
return 'MacOS'
elif name == 'Trident/7.0':
return 'Trident/rv'
elif "rv" in name:
return 'Trident/rv'
elif "SM" in name:
return 'SM/moto/lg'
elif name == 'SAMSUNG':
return 'SM'
elif 'LG' in name:
return 'SM/Moto/LG'
elif 'Moto' in name:
return 'SM/Moto/LG'
elif name == 'unknown':
return 'unknown'
else:
return 'others'
i_merged['Device_info_clean'] = i_merged['Device_info_clean'].apply(name_divide)
i_merged['Device_info_clean'].value_counts()
```
### <font color='blue'>Identity_feature engineered_dataset</font>
```
i_merged.columns
selected = []
selected.extend(['TransactionID', 'isFraud', 'id_01', 'id_02', 'DeviceType','Device_info_clean'])
id_exist = i_merged[selected].assign(Exist = 1)
id_exist.DeviceType.fillna('unknown', inplace = True)
id_exist.to_csv('identity_first.csv',index = False)
```
### <font color='blue'>Test: Decision Tree / Random Forest Test</font>
```
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score, roc_auc_score
X = id_exist.drop(['isFraud'], axis = 1)
Y = id_exist['isFraud']
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.3)
tree_clf = DecisionTreeClassifier(max_depth=10)
tree_clf.fit(X_train, y_train)
pred = tree_clf.predict(X_test)
print('F1:{}'.format(f1_score(y_test, pred)))
```
--------------------------
```
param_grid = {
'max_depth': list(range(10,51,10)),
'n_estimators': [20, 20, 20]
}
rf = RandomForestClassifier()
gs = GridSearchCV(estimator = rf, param_grid = param_grid,
cv = 5, n_jobs = -1, verbose = 2)
gs.fit(X_train,y_train)
best_rf = gs.best_estimator_
print('best parameter: \n',gs.best_params_)
y_pred = best_rf.predict(X_test)
print('Accuracy:{}'.format(accuracy_score(y_test, y_pred)),
'Precision:{}'.format(precision_score(y_test, y_pred)),
'Recall:{}'.format(recall_score(y_test, y_pred)),
'F1:{}'.format(f1_score(y_test, y_pred)),
'ROC_AUC:{}'.format(roc_auc_score(y_test, y_pred)), sep = '\n')
```
-----------------------
### <font color='blue'>거래 + ID merge</font>
```
transaction_c = pd.read_csv('train_combined.csv')
id_c = pd.read_csv('identity_first.csv')
region = pd.read_csv('region.csv')
country = region[['TransactionID', 'Country_code']]
country.head()
f_draft = transaction_c.merge(id_c.drop(['isFraud'], axis = 1) ,how = 'left', on = 'TransactionID')
f_draft.drop('DeviceInfo', axis = 1, inplace = True)
f_draft = f_draft.merge(country, how = 'left', on = 'TransactionID')
f_draft.head()
f_draft.dtypes
```
Categorical: 'ProductCD', 'card4', 'card6', 'D15', 'DeviceType', 'Device_info_clean'
```
print(
f_draft.ProductCD.unique(),
f_draft.card4.unique(),
f_draft.card6.unique(),
f_draft.D15.unique(),
f_draft.DeviceType.unique(),
f_draft.Device_info_clean.unique(),
)
print(map_ProductCD, map_card4,map_card6,map_D15, sep = '\n')
```
map_ProductCD = {'W': 0, 'H': 1, 'C': 2, 'S': 3, 'R': 4}
<br>
map_card4 = {'discover': 0, 'mastercard': 1, 'visa': 2, '}american express': 3}
<br>
map_card6 = {'credit': 0, 'debit': 1, 'debit or credit': 2, 'charge card': 3}
<br>
map_D15 = {'credit': 0, 'debit': 1, 'debit or credit': 2, 'charge card': 3}
<br>
map_DeviceType = {'mobile':2 'desktop':1 'unknown':0}
<br>
map_Device_info_clean = {'SM/moto/lg':1, 'iOS Device':2, 'Windows':3, 'unknown':0, 'MacOS':4, 'others':5,
'Trident/rv':6}
```
f_draft.ProductCD = f_draft.ProductCD.map(map_ProductCD)
f_draft.card4 = f_draft.card4.map(map_card4)
f_draft.card6 = f_draft.card6.map(map_card6)
f_draft.D15 = f_draft.D15.map(map_D15)
f_draft.DeviceType = f_draft.DeviceType.map(map_DeviceType)
f_draft.Device_info_clean = f_draft.Device_info_clean.map(map_Device_info_clean)
f_draft.to_csv('transaction_id_combined(no_label_encoded).csv', index = False)
f_draft.ProductCD = f_draft.ProductCD.astype('category')
f_draft.card4 = f_draft.card4.astype('category')
f_draft.card6 = f_draft.card6.astype('category')
f_draft.card1 = f_draft.card1.astype('category')
f_draft.card2 = f_draft.card2.astype('category')
f_draft.card3 = f_draft.card3.astype('category')
f_draft.card5 = f_draft.card5.astype('category')
f_draft.D15 = f_draft.D15.astype('category')
f_draft.DeviceType = f_draft.DeviceType.astype('category')
f_draft.Device_info_clean = f_draft.Device_info_clean.astype('category')
f_draft.Country_code = f_draft.Country_code.astype('category')
f_draft.card1 = f_draft.card1.astype('category')
f_draft.card2 = f_draft.card2.astype('category')
f_draft.card3 = f_draft.card3.astype('category')
f_draft.card5 = f_draft.card5.astype('category')
f_draft.dtypes
f_draft.to_csv('transaction_id_combined.csv', index = False)
f_draft.head()
```
| github_jupyter |
```
import glob
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
```
# README
This notebook extracts some information about fitting. For each molecule, it creates a CSV file.
It calculates the Euclidean distance and topological distance (number of bonds separating an atom and the halogen).
```
def parsePrepAc(prep_ac):
# read file content
with open(prep_ac) as stream:
lines = stream.readlines()
# browse file content
atoms = {}
bonds = []
ref_at_name = None
for line in lines:
l_spl = line.split()
# skip short
if len(l_spl) == 0:
continue
# save atom
if l_spl[0] == "ATOM":
at_id = int(l_spl[1])
at_name = l_spl[2]
at_type = l_spl[-1]
x = float(line[30:38])
y = float(line[38:46])
z = float(line[46:54])
atoms[at_name] = [at_id, at_type, np.array((x, y, z))]
if "I" in at_name or "Cl" in at_name or "Br" in at_name:
ref_at_name = at_name
continue
if l_spl[0] == "BOND":
at_name1 = l_spl[-2]
at_name2 = l_spl[-1]
bonds.append([at_name1, at_name2])
return atoms, bonds, ref_at_name
def getNBDistances(atoms, bonds, ref_at_name):
distances = []
for atom in atoms:
distance = findShortestNBDistance(atom, bonds, ref_at_name)
distances.append(distance)
return distances
def findShortestNBDistance(atom, bonds, ref_atom):
dist = 0
starts = [atom]
while True:
ends = []
for start in starts:
if start == ref_atom:
return dist
for bond in bonds:
if start in bond:
end = [i for i in bond if i != start][0]
ends.append(end)
starts = ends
dist += 1
def getEuclideanDistances(atoms, ref_at_name):
distances = []
coords_ref = atoms[ref_at_name][2]
for at_name, at_values in atoms.items():
at_id, at_type, coords = at_values
distance = np.linalg.norm(coords_ref - coords)
distances.append(distance)
return distances
def getChargesFromPunch(punch, n_atoms, sigma=False):
# initialize output container
charges = []
# read file content
with open(punch) as stream:
lines = stream.readlines()
# define, where to find atoms and charges
lines_start = 11
lines_end = lines_start + n_atoms
if sigma:
lines_end += 1
# browse selected lines and save charges
for line in lines[lines_start:lines_end]:
l_spl = line.split()
charge = float(l_spl[3])
charges.append(charge)
return charges
def sortAtoms(atoms):
at_names = list(atoms.keys())
at_ids = [i[0] for i in atoms.values()]
at_types = [i[1] for i in atoms.values()]
atoms_unsorted = list(zip(at_names, at_ids, at_types))
atoms_sorted = sorted(atoms_unsorted, key=lambda x: x[1])
at_names_sorted = [a[0] for a in atoms_sorted]
at_types_sorted = [a[2] for a in atoms_sorted]
return at_names_sorted, at_types_sorted
for halogen in "chlorine bromine iodine".split():
mols = sorted(glob.glob(f"../{halogen}/ZINC*"))
for mol in mols:
# get info about atoms and bonds
prep_ac = mol + "/antechamber/ANTECHAMBER_PREP.AC"
atoms, bonds, ref_at_name = parsePrepAc(prep_ac)
n_atoms = len(atoms)
# number-of-bond distance from the halogen
nb_distances = getNBDistances(atoms, bonds, ref_at_name)
# eucledian distances from the halogen
distances = getEuclideanDistances(atoms, ref_at_name)
# standard RESP charges
punch_std = mol + "/antechamber/punch"
qs_std = getChargesFromPunch(punch_std, n_atoms)
# modified RESP charges including sigma-hole
punch_mod = mol + "/mod2/punch"
qs_mod = getChargesFromPunch(punch_mod, n_atoms, sigma=True)
# correct sorting of atoms
atom_names_sorted, atom_types_sorted = sortAtoms(atoms)
# output dataframe
df = pd.DataFrame({"name": atom_names_sorted + ["X"],
"type": atom_types_sorted + ["x"],
"nb_distance": nb_distances + [-1],
"distance": distances + [-1],
"q_std": qs_std + [0],
"q_mod": qs_mod})
# save
df.to_csv(mol + "/overview.csv", index=False)
"done"
df
```
| github_jupyter |
<a href="https://colab.research.google.com/github/Pager07/A-Hackers-AI-Voice-Assistant/blob/master/DataCleansingAndEda.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
#Load Data
```
import pandas as pd
import numpy as np
isMergedDatasetAvailabel = True
if not isMergedDatasetAvailabel:
train_bodies_df = pd.read_csv('train_bodies.csv')
train_stance_df = pd.read_csv('train_stances.csv')
test_bodies_df = pd.read_csv('competition_test_bodies.csv')
test_stance_df = pd.read_csv('competition_test_stances.csv')
#merge the training dataframe
train_merged = pd.merge(train_stance_df,train_bodies_df,on='Body ID',how='outer')
test_merged = pd.merge(test_stance_df,test_bodies_df,on='Body ID', how='outer')
else:
train_merged = pd.read_csv('train_merged.csv',index_col=0)
test_merged = pd.read_csv('test_merged.csv',index_col=0)
train_merged.head()
test_merged.head()
```
#Data Cleaning
```
import re
import numpy as np
from sklearn import feature_extraction
from sklearn.feature_extraction.text import CountVectorizer
import nltk
from nltk.corpus import wordnet
from nltk.tokenize import word_tokenize
#downloads
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('stopwords')
wnl= nltk.WordNetLemmatizer()
def normalize(word):
'''
Helper function fo get_normalized_tokens()
Takes a word and lemmatizes it eg. bats -> bat
Args:
word: str
'''
return wnl.lemmatize(word,wordnet.VERB).lower()
def get_normalized_tokens(seq):
'''
Takes a sentence and returns normalized tokens
Args:
seq: str, A sentece
'''
normalized_tokens = []
for token in nltk.word_tokenize(seq):
normalized_tokens.append(normalize(token))
return normalized_tokens
def clean(seq):
'''
Takes a senetence and removes emojies, non-numerical, non-alphabetically words
Args:
seq: str, A sentece
'''
valid = re.findall(r'\w+', seq, flags=re.UNICODE)
seq = ' '.join(valid).lower()
return seq
def remove_stopwords(token_list):
'''
Args:
token_list: List, containg tokens
'''
filtered_token_list = []
for w in token_list:
if w not in feature_extraction.text.ENGLISH_STOP_WORDS:
filtered_token_list.append(w)
return filtered_token_list
def preprocess(sentence):
'''
This function takes in a raw body sentence|title and returns preproccesed sentence
'''
#Remove non-alphabatically, non-numerical,emojis etc..
sentence = clean(sentence)
#(normalization/lemmatization)
tokens = get_normalized_tokens(sentence)
#remove any stopwords
tokens = remove_stopwords(tokens)
sentence = ' '.join(tokens)
return sentence
train_merged['articleBody']= train_merged['articleBody'].apply(preprocess)
test_merged['articleBody'] = test_merged['articleBody'].apply(preprocess)
train_merged['Headline']=train_merged['Headline'].apply(preprocess)
test_merged['Headline']= test_merged['Headline'].apply(preprocess)
train_merged.to_csv('train_merged.csv')
test_merged.to_csv('test_merged.csv')
```
#EDA
```
def get_top_trigrams(corpus, n=10):
vec = CountVectorizer(ngram_range=(3, 3)).fit(corpus) # parameter is set for 2 (bigram)
bag_of_words = vec.transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()]
words_freq = sorted(words_freq, key = lambda x: x[1], reverse=True)
return words_freq[:n]
#first let us check the biagram of all the data
plt.figure(figsize=(10, 5))
top_tweet_bigrams = get_top_trigrams(train_merged['Headline'],n=20)
y, x = map(list, zip(*top_tweet_bigrams))
sns.barplot(x=x, y=y)
plt.title('Biagrams (Headline)')
#first let us check the biagram of all the data
plt.figure(figsize=(10, 5))
top_tweet_bigrams = get_top_trigrams(train_merged['articleBody'],n=20)
y, x = map(list, zip(*top_tweet_bigrams))
sns.barplot(x=x, y=y)
plt.title('Biagrams (articleBody)')
word = 'plays'
out = normalize(word)
assert out == 'play'
text ='hello I #like to eatsfood 123'
out = get_normalized_tokens(text)
assert out == ['hello', 'i', '#', 'like', 'to', 'eatsfood','123']
text ='. hello I #like to eatsfood 123 -+~@:%^&www.*😔😔'
out = clean(text);out
assert out == 'hello i like to eatsfood 123 www'
token_list = ['hello', 'i', '#', 'like', 'to', 'eatsfood','123']
out = remove_stopwords(token_list);
assert out == ['hello', '#', 'like', 'eatsfood', '123']
text ='. hello bats,cats, alphakenny I am #like to eatsfood 123 -+~@:%^&www.*😔😔'
out = preprocess(text); out
#Very imblanaced
train_merged['Stance'].hist()
test_merged['Stance'].hist()
lens = train_merged['Headline'].str.len()
lens.mean(), lens.std(), lens.max()
lens = test_merged['Headline'].str.len()
lens.mean(), lens.std(), lens.max()
#The lenght seem to vary alot
lens = train_merged['articleBody'].str.len()
lens.mean(), lens.std(), lens.max()
lens = test_merged['articleBody'].str.len()
lens.mean(), lens.std(), lens.max()
```
#1.a tf-idf feature extraction
```
from sklearn.feature_extraction.text import TfidfVectorizer
from scipy.sparse import hstack
totaldata= (train_merged['articleBody'].tolist() + train_merged['Headline'].tolist()+test_merged['articleBody'].tolist()+test_merged['Headline'].tolist())
tfidf_vect = TfidfVectorizer(analyzer='word', token_pattern=r'\w{1,}', max_features=80, stop_words='english')
tfidf_vect.fit(totaldata)
print('===Starting train headline====')
train_head_feature= tfidf_vect.transform(train_merged['Headline']) #(49972, 80)
print('===Starting Train body====')
train_body_feature= tfidf_vect.transform(train_merged['articleBody']) #(49972, 80)
print('===Starting Test headline====')
test_head_feature= tfidf_vect.transform(test_merged['Headline']) #(25413, 80)
print('===Starting Test articleBody====')
test_body_feature = tfidf_vect.transform(test_merged['articleBody']) #(25413, 80)
def binary_labels(label):
if label in ['discuss', 'agree', 'disagree']:
return 'related'
elif label in ['unrelated']:
return label
else:
assert f'{label} not found!'
train_merged_labels = train_merged['Stance'].apply(binary_labels)
test_merged_labels = test_merged['Stance'].apply(binary_labels)
print(train_merged_labels.unique(), test_merged_labels.unique())
X_train_tfidf,Y_train = hstack([train_head_feature,train_body_feature]).toarray(), train_merged_labels.values
X_test_tfidf,Y_test = hstack([test_head_feature,test_body_feature]).toarray(), test_merged_labels.values
```
#Train with tf-idf features - Navie Bayes
```
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import accuracy_score
from sklearn import metrics
from sklearn.metrics import confusion_matrix,accuracy_score,roc_auc_score,roc_curve,auc,f1_score
from sklearn.preprocessing import LabelEncoder
def binary_labels(label):
if label in ['discuss', 'agree', 'disagree']:
return 'related'
elif label in ['unrelated']:
return label
else:
assert f'{label} not found!'
train_merged_labels = train_merged['Stance'].apply(binary_labels)
test_merged_labels = test_merged['Stance'].apply(binary_labels)
print(train_merged_labels.unique(), test_merged_labels.unique())
X_train_tfidf,Y_train = hstack([train_head_feature,train_body_feature]).toarray(), train_merged_labels.values
X_test_tfidf,Y_test = hstack([test_head_feature,test_body_feature]).toarray(), test_merged_labels.values
print(X_train_tfidf.shape,X_test_tfidf.shape )
train_merged['Stance'].unique()
net = MultinomialNB(alpha=0.39)
net.fit(X_train_tfidf, Y_train)
print("train score:", net.score(X_train_tfidf, Y_train))
print("validation score:", net.score(X_test_tfidf, Y_test))
import matplotlib.pyplot as plt
import seaborn as sn
plt.style.use('ggplot')
# Create the confussion matrix
def plot_confussion_matrix(y_test, y_pred):
''' Plot the confussion matrix for the target labels and predictions '''
cm = confusion_matrix(y_test, y_pred)
# Create a dataframe with the confussion matrix values
df_cm = pd.DataFrame(cm, range(cm.shape[0]),
range(cm.shape[1]))
# Plot the confussion matrix
sn.set(font_scale=1.4) #for label size
sn.heatmap(df_cm, annot=True,fmt='.0f',cmap="YlGnBu",annot_kws={"size": 10})# font size
plt.show()
# ROC Curve
# plot no skill
# Calculate the points in the ROC curve
def plot_roc_curve(y_test, y_pred):
''' Plot the ROC curve for the target labels and predictions'''
enc = LabelEncoder()
y_test = enc.fit_transform(y_test)
y_pred = enc.fit_transform(y_pred)
fpr, tpr, thresholds = roc_curve(y_test, y_pred, pos_label=1)
roc_auc= auc(fpr,tpr)
plt.figure(figsize=(12, 12))
ax = plt.subplot(121)
ax.set_aspect(1)
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
# Predicting the Test set results
prediction = net.predict(X_test_tfidf)
#print the classification report to highlight the accuracy with f1-score, precision and recall
print(metrics.classification_report(prediction, Y_test))
plot_confussion_matrix(prediction, Y_test)
plot_roc_curve(prediction, Y_test)
```
#TF-idf Binary classification with logistice regression
Steps:
- Create k-fold starififed dataloader [x]
- Use Sampler to have more control over the batch
- Write a function get_dataloaders() that will return dict of shape fold_id x tuple. The tuple contains dataloader
- Train the modle on all the splits [x]
- How?
- Write a function that will train for 1 single fold
- It will take the train_loader and test_loader of that split
- These loaders can be accessed by the get_dataloaders()
- Evaluate the model
- Do we need to evaluate the model after each epoch?
- Yes we need need to
- Print the stats
- Track the stats
- Use tracked stats of (fold x stats) to generate global stats
- What is stats, in other words what are we using to measure the performance?
- Accurracy and F-Score??
- the class-wise and the macro-averaged F1scores
- this metrics are not affected by the large size of the majority class.
- What is class-wise F1score?
- harmoic means of precison and recalls of four class
- What is F1m meteric?
- The macro F1 Score
- What is macro F1 Score?
- Draw/do compuatation across all the rows then compute average across that
- How can we get this score?
- Use sklearn classification report
- set the output_dict=1
- out['macro avg']['f1-score']
- out['macro avg']['accuracy']
- How will I know if the model is overfitting?
- calcualte the test loss
- At last I can send the whole test set for classification
- then plot ROC
- confusion matrxi
- What about the class weights?
- FNC-1 paper:
- 0.25 reward crrectly classfiying reward R
- 1-0.25: 0.75 (extra pentaly)
- Total Pentatlty: 1+0.75
- 0.25 reward crrectly classfiying reward UR
- Train the model
- Load the dataset
- load the csv
- load the X_Train,Y-train
- load the X_text , Y_test
- Send them into gpu
- trian
```
from torch.utils.data import DataLoader,Dataset
import torch
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from torch.utils.data import ConcatDataset,SubsetRandomSampler
from collections import defaultdict
class TfidfBinaryStanceDataset(Dataset):
def __init__(self, X,Y):
'''
Args:
X: (samples x Features)
Y: (samples). containing binary class eg. [1,0,1,1,....]
'''
super(TfidfBinaryStanceDataset, self).__init__()
self.x = torch.tensor(X).float()
self.y = torch.tensor(Y).long()
def __len__(self):
return len(self.x)
def __getitem__(self,idx):
return (self.x[idx] ,self.y[idx])
def get_dataloaders(x_train,y_train,x_test,y_test,bs=256,nfold=5):
'''
Args:
x_train: nd.array of shape (samples x features)
y_train: nd.array of shape (labels )
x_test: nd.array of shape (samples x features)
y_test: nd.array of shape (labels )
nfold: Scalar, number of total folds, It can't be greater than number of samples in each class
Returns:
loaders: Dict of shape (nfolds x 2), where the keys are fold ids and tuple containing train and test loader for
that split
'''
train_dataset = TfidfBinaryStanceDataset(x_train,y_train)
test_dataset = TfidfBinaryStanceDataset(x_test,y_test)
dataset = ConcatDataset([train_dataset,test_dataset]) #A big dataset
kfold = StratifiedKFold(n_splits=nfold, shuffle=False)
labels = [data[1] for data in dataset]
loaders = defaultdict(tuple)
for fold,(train_ids,test_ids) in enumerate(kfold.split(dataset,labels)):
train_subsampler = SubsetRandomSampler(train_ids)
test_subsampler = SubsetRandomSampler(test_ids)
train_loader = torch.utils.data.DataLoader(dataset,batch_size=bs, sampler=train_subsampler) #
test_loader = torch.utils.data.DataLoader(dataset,batch_size=bs, sampler=test_subsampler)
loaders[fold] = (train_loader,test_loader)
return loaders
import torch
import torch.nn as nn
from torch.optim import Adam
import numpy as np
from collections import defaultdict
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import classification_report
class LogisticRegression(nn.Module):
def __init__(self, input_dim, output_dim):
super(LogisticRegression,self).__init__()
self.linear = nn.Linear(input_dim,output_dim)
def forward(self,x):
out = self.linear(x)
return out
def eval_one_epoch(net,dataloader,optim,lfn,triplet_lfn,margin):
net.eval()
losses = []
f1m = []
for batch_id, (x,y) in enumerate(dataloader):
assert len(torch.unique(y)) != 1
x = x.to(device).float()
y = y.to(device).long()
hs = net(x) #(sampels x 2)
#BCE-loss
ce_loss = lfn(hs,y)
#triplet-loss
#generate the triplet
probs = hs.softmax(dim=1) #(samples x 2)
y_hat = probs.argmax(dim=1)
anchors,positives, negatives = generate_triplets(hs,y_hat,y) #(misclassified_samples, d_model=2)
anchors,positives, negatives = mine_hard_triplets(anchors,positives,negatives,margin)
triplet_loss = triplet_lfn(anchors,positives,negatives)
#total-loss
loss = (ce_loss + triplet_loss)/2
losses += [loss.item()]
target_names = ['unrelated','related']
f1m += [classification_report(y_hat.detach().cpu().numpy(),
y.detach().cpu().numpy(), target_names=target_names,output_dict=1)['macro avg']['f1-score']]
return np.mean(losses), np.mean(f1m)
def train_one_epoch(net,dataloader,optim,lfn,triplet_lfn,margin):
net.train()
losses = []
for batch_id, (x_train,y_train) in enumerate(dataloader):
x_train = x_train.to(device).float()
y_train = y_train.to(device).long()
hs = net(x_train) #(sampels x 2)
#BCE-loss
ce_loss = lfn(hs,y_train)
#triplet-loss
#generate the triplet
probs = hs.softmax(dim=1) #(samples x 2)
y_hat = probs.argmax(dim=1)
anchors,positives, negatives = generate_triplets(hs,y_hat,y_train) #(misclassified_samples, d_model=2)
anchors,positives, negatives = mine_hard_triplets(anchors,positives,negatives,margin)
triplet_loss = triplet_lfn(anchors,positives,negatives)
#total-loss
loss = (ce_loss + triplet_loss)/2
loss.backward()
optim.step()
optim.zero_grad()
losses += [loss.item()]
return sum(losses)/len(losses)
def mine_hard_triplets(anchors,positives,negatives,margin):
'''
Args:
anchor: Tensor of shape (missclassified_samples x 2 )
positive: Tensor of shape (missclassified_smaples_positive x 2)
negative: Tensor of shape (missclassified_smaples_negative x 2)
Returns:
anchor: Tensor of shape (hard_missclassified_samples x 2 )
positive: Tensor of shape (hard_missclassified_smaples_positive x 2)
negative: Tensor of shape (hard_missclassified_smaples_negative x 2)
'''
#mine-semihar triplets
l2_dist = nn.PairwiseDistance()
d_p = l2_dist(anchors, positives)
d_n = l2_dist(anchors, negatives)
hard_triplets = torch.where((d_n - d_p < margin))[0]
anchors = anchors[hard_triplets]
positives = positives[hard_triplets]
negatives = negatives[hard_triplets]
return anchors,positives,negatives
def generate_triplets(hs,y_hat,y):
'''
Args:
hs: (Samples x 2)
y_hat: Tensor of shape (samples,), Containing predicted label eg. [1,0,1,1,1,1]
y: Tensor of shape (samples,), Containing GT label eg. [1,0,1,1,1,1]
Returns:
anchor: Tensor of shape (missclassified_samples x 2 )
positive: Tensor of shape (missclassified_smaples_positive x 2)
negative: Tensor of shape (missclassified_smaples_negative x 2)
'''
mismatch_indices = torch.where(y_hat != y)[0]
anchors = hs[mismatch_indices] #(miscalssfied_samples x 2)
positives = get_positives(hs,mismatch_indices,y) #(miscalssfied_samples x 2)
negatives = get_negatives(hs,mismatch_indices,y)
return anchors,positives, negatives
def get_positives(hs,misclassified_indicies,y):
'''
For each misclassfied sample we, randomly pick 1 positive anchor
Args:
hs: (Samples x 2)
mismatch_indices: A tensor of shape [misclassified], containing row indices relatie to hs
y: Tensor of shape (samples,), Containing GT label eg. [1,0,1,1,1,1]
Returns:
positive: Tensor of shape [misclassified x 2]
'''
positives_indices = []
negative_indices = []
for anchor_index in misclassified_indicies:
anchor_class = y[anchor_index]
possible_positives = torch.where(y == anchor_class)[0]
positive_index = anchor_index
while anchor_index == positive_index:
positive_index = np.random.choice(possible_positives.detach().cpu().numpy())
positives_indices += [positive_index]
positives = hs[positives_indices]
return positives
def get_negatives(hs,misclassified_indicies,y):
'''
For each misclassfied sample we, randomly pick 1 negative anchor
Args:
hs: (Samples x 2)
mismatch_indices: A tensor of shape [misclassified], containing row indices relatie to hs
y: Tensor of shape (samples,), Containing GT label eg. [1,0,1,1,1,1]
Returns:
positive: Tensor of shape [misclassified x 2]
'''
negative_indices = []
for anchor_index in misclassified_indicies:
anchor_class = y[anchor_index]
possible_negatives = torch.where(y != anchor_class)[0]
negative_index = np.random.choice(possible_negatives.detach().cpu().numpy()) #possible_negatives are empty
negative_indices += [negative_index]
negatives = hs[negative_indices]
return negatives
def save_model(net,macro_fs,fs):
if fs>=max(macro_fs):
torch.save(net,'./net.pth')
#TODO: Find the class wegihts
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
epoch = 20
margin= 0.5
lr = 4.33E-02
bs = 1024
nfolds = 5
enc = LabelEncoder()
x_train = X_train_tfidf
y_train =enc.fit_transform(Y_train)
x_test = X_test_tfidf
y_test = enc.fit_transform(Y_test)
def train():
class_weights = torch.tensor([1.75,1]).to(device)
lfn = nn.CrossEntropyLoss(weight=class_weights).to(device)
triplet_lfn = nn.TripletMarginLoss(margin=margin).to(device)
loaders = get_dataloaders(x_train,y_train,x_test,y_test,bs=bs, nfold=nfolds) #dict of shape (nfold x 2),2 because it consist of train_loader and test_loader
macro_f1m = []
for fold in range(nfolds):
fold_macro_f1m =[]
print(f'Starting training for fold:{fold}')
net = LogisticRegression(input_dim= x_train.shape[1],
output_dim= 2).to(device)
optim = Adam(net.parameters(), lr=lr)
for e in range(epoch):
train_loss = train_one_epoch(net,
loaders[fold][0],
optim,
lfn,
triplet_lfn,
margin)
eval_loss,f1m = eval_one_epoch(net,
loaders[fold][1],
optim,
lfn,
triplet_lfn,
margin)
macro_f1m += [f1m]
fold_macro_f1m += [f1m]
save_model(net,macro_f1m,f1m)
if (e+1)%5==0:
print(f'nfold:{fold},epoch:{e},train loss:{train_loss}, eval loss:{eval_loss}, fm1:{f1m}')
print(f'Fold:{fold}, Average F1-Macro:{np.mean(fold_macro_f1m)}')
print('=======================================')
print(f'{nfolds}-Folds Average F1-Macro:{np.mean(macro_f1m)}')
return np.mean(macro_f1m)
#Use Cyclical Learning Rates for Training Neural Networks to roughly estimate good lr
#!pip install torch_lr_finder
from torch_lr_finder import LRFinder
loaders = get_dataloaders(x_train,y_train,x_test,y_test,bs=256, nfold=nfolds)
train_loader = loaders[0][0]
model = LogisticRegression(160,2)
criterion = nn.CrossEntropyLoss()
optimizer = Adam(model.parameters(), lr=1e-7, weight_decay=1e-2)
lr_finder = LRFinder(model, optimizer, criterion, device="cuda")
lr_finder.range_test(train_loader, end_lr=100, num_iter=100)
lr_finder.plot() # to inspect the loss-learning rate graph
lr_finder.reset() # to reset the model and optimizer to their initial state
train()
```
#test
```
#net = torch.load('./net.pth')
net.eval()
x_test = torch.from_numpy(X_test_tfidf).to(device).float()
probs = net(x_test)
prediction = probs.argmax(dim=1).detach().cpu().numpy()
# #print the classification report to highlight the accuracy with f1-score, precision and recall
prediction = ['unrelated' if p else 'related' for p in prediction ]
print(metrics.classification_report(prediction, Y_test))
plot_confussion_matrix(prediction, Y_test)
plot_roc_curve(prediction, Y_test)
#test for get_positives
test_hs = torch.tensor([[0.8799, 0.0234],
[0.2341, 0.8839],
[0.8705, 0.1356],
[0.9723, 0.1930],
[0.7416, 0.4498]])
test_mi = torch.tensor([0,1,2])
y = torch.tensor([0,0,1,1,1])
out = get_positives(test_hs,test_mi, y)
assert out.shape == (3,2)
#test for get_negatives
test_hs = torch.tensor([[0.8799, 0.0234],
[0.2341, 0.8839],
[0.8705, 0.1356],
[0.9723, 0.1930],
[0.7416, 0.4498]])
test_mi = torch.tensor([0,1,2])
y = torch.tensor([0,0,1,1,1])
out = get_negatives(test_hs,test_mi, y)
assert out.shape == (3,2)
#test for generate_triplets
test_hs = torch.tensor([[0.8799, 0.0234],
[0.2341, 0.8839],
[0.8705, 0.1356],
[0.9723, 0.1930],
[0.7416, 0.4498]])
y_hat = torch.tensor([1,1,1,1,1]) #
y = torch.tensor([1,1,1,0,0])
a,p,n = generate_triplets(test_hs,y_hat,y)
assert a.shape == (2,2)
assert p.shape == (2,2)
assert n.shape == (2,2)
#test for mine_hard_triplets
a = torch.tensor([[0.8799, 0.0234],
[0.2341, 0.8839],
[0.7416, 0.4498]])
p = torch.tensor([[0.8799, 0.0234],
[0.2341, 0.8839],
[0.7416, 0.4498]])
n = torch.tensor([[0.8799, 0.0234],
[0.2341, 0.8839],
[0.7416, 0.4498]])
h_a , h_p ,h_n= mine_hard_triplets(a,p,n,0.5)
assert h_a.shape == (3,2)
assert h_p.shape == (3,2)
assert h_n.shape == (3,2)
x_train = torch.tensor([[0.8799, 0.0234],
[0.2341, 0.8839],
[0.7416, 0.4498]])
y_train = [1,1,0]
x_test = torch.tensor([[0.8799, 0.0234],
[0.2341, 0.8839],
[0.7416, 0.4498]])
y_test = [1,1,0,0]
loader = get_dataloaders(x_train,y_train,x_test,y_test,bs=1,nfold=2)
assert len(loader) == 2
for k,(train_loader,test_loader) in loader.items():
print(loaders)
for x,y in train_loader:
print(x.shape)
print(y.shape)
x ,y = loader[0]
```
| github_jupyter |
<a href="https://colab.research.google.com/github/tuanavu/deep-learning-tutorials/blob/development/colab-example-notebooks/colab_github_demo.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Using Google Colab with GitHub
[Google Colaboratory](http://colab.research.google.com) is designed to integrate cleanly with GitHub, allowing both loading notebooks from github and saving notebooks to github.
## Loading Public Notebooks Directly from GitHub
Colab can load public github notebooks directly, with no required authorization step.
For example, consider the notebook at this address: https://github.com/googlecolab/colabtools/blob/master/notebooks/colab-github-demo.ipynb.
The direct colab link to this notebook is: https://colab.research.google.com/github/googlecolab/colabtools/blob/master/notebooks/colab-github-demo.ipynb.
To generate such links in one click, you can use the [Open in Colab](https://chrome.google.com/webstore/detail/open-in-colab/iogfkhleblhcpcekbiedikdehleodpjo) Chrome extension.
## Browsing GitHub Repositories from Colab
Colab also supports special URLs that link directly to a GitHub browser for any user/organization, repository, or branch. For example:
- http://colab.research.google.com/github will give you a general github browser, where you can search for any github organization or username.
- http://colab.research.google.com/github/googlecolab/ will open the repository browser for the ``googlecolab`` organization. Replace ``googlecolab`` with any other github org or user to see their repositories.
- http://colab.research.google.com/github/googlecolab/colabtools/ will let you browse the main branch of the ``colabtools`` repository within the ``googlecolab`` organization. Substitute any user/org and repository to see its contents.
- http://colab.research.google.com/github/googlecolab/colabtools/blob/master will let you browse ``master`` branch of the ``colabtools`` repository within the ``googlecolab`` organization. (don't forget the ``blob`` here!) You can specify any valid branch for any valid repository.
## Loading Private Notebooks
Loading a notebook from a private GitHub repository is possible, but requires an additional step to allow Colab to access your files.
Do the following:
1. Navigate to http://colab.research.google.com/github.
2. Click the "Include Private Repos" checkbox.
3. In the popup window, sign-in to your Github account and authorize Colab to read the private files.
4. Your private repositories and notebooks will now be available via the github navigation pane.
## Saving Notebooks To GitHub or Drive
Any time you open a GitHub hosted notebook in Colab, it opens a new editable view of the notebook. You can run and modify the notebook without worrying about overwriting the source.
If you would like to save your changes from within Colab, you can use the File menu to save the modified notebook either to Google Drive or back to GitHub. Choose **File→Save a copy in Drive** or **File→Save a copy to GitHub** and follow the resulting prompts. To save a Colab notebook to GitHub requires giving Colab permission to push the commit to your repository.
## Open In Colab Badge
Anybody can open a copy of any github-hosted notebook within Colab. To make it easier to give people access to live views of GitHub-hosted notebooks,
colab provides a [shields.io](http://shields.io/)-style badge, which appears as follows:
[](https://colab.research.google.com/github/googlecolab/colabtools/blob/master/notebooks/colab-github-demo.ipynb)
The markdown for the above badge is the following:
```markdown
[](https://colab.research.google.com/github/googlecolab/colabtools/blob/master/notebooks/colab-github-demo.ipynb)
```
The HTML equivalent is:
```HTML
<a href="https://colab.research.google.com/github/googlecolab/colabtools/blob/master/notebooks/colab-github-demo.ipynb">
<img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/>
</a>
```
Remember to replace the notebook URL in this template with the notebook you want to link to.
```
```
| github_jupyter |
```
# Parameters
# Build the dataset
from typing import Optional
import pandas as pd
import functools
def add_parent_level(df: pd.DataFrame, name: str) -> None:
df.columns = pd.MultiIndex.from_tuples([(name, x) for x in df.columns])
def calculate_limit(row: pd.Series, attribute: str) -> Optional[float]:
row_analysis = local_analysis.get(row.name)
if row_analysis is None:
return None
vm_spec = compute_specs.virtual_machine_by_name(row_analysis.advisor_sku)
return getattr(vm_spec.capabilities, attribute)
def add_limit(df: pd.DataFrame, name: str) -> None:
df['new_limit'] = df.apply(functools.partial(calculate_limit, attribute=name), axis=1)
drop_utilization = ['samples', 'percentile_50th', 'percentile_80th']
drop_disk_utilization = ['cached', 'counter_name']
res_data = resources.assign(resource_name=resources.resource_id.str.extract(r'([^/]+)$'))
res_data = res_data.drop(columns=['subscription_id', 'storage_profile'])
res_data = res_data.set_index('resource_id')
res_data_col = res_data.columns.to_list()
res_data_col = res_data_col[1:-1] + res_data_col[-1:] + res_data_col[0:1]
res_data = res_data[res_data_col]
add_parent_level(res_data, 'Resource')
if local_analysis:
local_data = pd.DataFrame([(k, v.advisor_sku, v.advisor_sku_invalid_reason, v.annual_savings_no_ri) for k,v in local_analysis.items()], columns=['resource_id', 'recommendation', 'invalidation', 'annual_savings']).convert_dtypes()
local_data = local_data.set_index('resource_id')
add_parent_level(local_data, 'AzMeta')
if advisor_analysis:
advisor_data = pd.DataFrame([(k, v.advisor_sku, v.advisor_sku_invalid_reason) for k,v in advisor_analysis.items()], dtype='string', columns=['resource_id', 'recommendation', 'invalidation'])
advisor_data = advisor_data.set_index('resource_id')
add_parent_level(advisor_data, 'Advisor')
cpu_data = cpu_utilization.drop(columns=drop_utilization).set_index('resource_id')
add_limit(cpu_data, 'd_total_acus')
add_parent_level(cpu_data, 'CPU Used (ACUs)')
mem_data = mem_utilization.drop(columns=drop_utilization).set_index('resource_id')
mem_data = mem_data / 1024.0
add_limit(mem_data, 'memory_gb')
add_parent_level(mem_data, 'Memory Used (GiB)')
disk_tput_cached = disk_utilization[(disk_utilization.cached == True) & (disk_utilization.counter_name == 'Disk Bytes/sec')]
disk_tput_cached = disk_tput_cached.drop(columns=drop_utilization + drop_disk_utilization).set_index('resource_id')
add_limit(disk_tput_cached, 'combined_temp_disk_and_cached_read_bytes_per_second')
disk_tput_cached = disk_tput_cached / (1024.0 ** 2)
add_parent_level(disk_tput_cached, 'Cached Disk Througput (MiB/sec)')
disk_trans_cached = disk_utilization[(disk_utilization.cached == True) & (disk_utilization.counter_name == 'Disk Transfers/sec')]
disk_trans_cached = disk_trans_cached.drop(columns=drop_utilization + drop_disk_utilization).set_index('resource_id')
add_limit(disk_trans_cached, 'combined_temp_disk_and_cached_iops')
add_parent_level(disk_trans_cached, 'Cached Disk Operations (IOPS)')
disk_tput_uncached = disk_utilization[(disk_utilization.cached == False) & (disk_utilization.counter_name == 'Disk Bytes/sec')]
disk_tput_uncached = disk_tput_uncached.drop(columns=drop_utilization + drop_disk_utilization).set_index('resource_id')
add_limit(disk_tput_uncached, 'uncached_disk_bytes_per_second')
disk_tput_uncached = disk_tput_uncached / (1024.0 ** 2)
add_parent_level(disk_tput_uncached, 'Uncached Disk Througput (MiB/sec)')
disk_trans_uncached = disk_utilization[(disk_utilization.cached == False) & (disk_utilization.counter_name == 'Disk Transfers/sec')]
disk_trans_uncached = disk_trans_uncached.drop(columns=drop_utilization + drop_disk_utilization).set_index('resource_id')
add_limit(disk_trans_uncached, 'uncached_disk_iops')
add_parent_level(disk_trans_uncached, 'Uncached Disk Operations (IOPS)')
all_joins = [cpu_data, mem_data, disk_tput_cached, disk_trans_cached, disk_tput_uncached, disk_trans_uncached]
if local_analysis:
all_joins.insert(0, local_data)
if advisor_analysis:
all_joins.append(advisor_data)
full_data = res_data.join(all_joins)
full_data.sort_index(inplace=True)
full_data.to_excel('final_out_test.xlsx')
```
# AzMeta Resize Recommendations
```
import datetime
print("Report Date:", datetime.datetime.now().isoformat())
print("Total Annual Savings:", "${:,.2f}".format(local_data[('AzMeta', 'annual_savings')].sum()), "(Non-RI Pricing, SQL and Windows AHUB Licensing)")
# Present the dataset
import matplotlib as plt
import itertools
from matplotlib import colors
def background_limit_coloring(row):
cmap="coolwarm"
text_color_threshold=0.408
limit_index = (row.index.get_level_values(0)[0], 'new_limit')
smin = 0
smax = row[limit_index]
if pd.isna(smax):
return [''] * len(row)
rng = smax - smin
norm = colors.Normalize(smin, smax)
rgbas = plt.cm.get_cmap(cmap)(norm(row.to_numpy(dtype=float)))
def relative_luminance(rgba):
r, g, b = (
x / 12.92 if x <= 0.03928 else ((x + 0.055) / 1.055 ** 2.4)
for x in rgba[:3]
)
return 0.2126 * r + 0.7152 * g + 0.0722 * b
def css(rgba):
dark = relative_luminance(rgba) < text_color_threshold
text_color = "#f1f1f1" if dark else "#000000"
return f"background-color: {colors.rgb2hex(rgba)};color: {text_color};"
return [css(rgba) for rgba in rgbas[0:-1]] + ['']
def build_header_style(col_groups):
start = 0
styles = []
palette = ['#f6f6f6', '#eae9e9', '#d4d7dd', '#f6f6f6', '#eae9e9', '#d4d7dd', '#f6f6f6', '#eae9e9', '#d4d7dd']
for i,group in enumerate(itertools.groupby(col_groups, lambda c:c[0])):
styles.append({'selector': f'.col_heading.level0.col{start}', 'props': [('background-color', palette[i])]})
group_len = len(tuple(group[1]))
for j in range(group_len):
styles.append({'selector': f'.col_heading.level1.col{start + j}', 'props': [('background-color', palette[i])]})
start += group_len
return styles
data_group_names = [x for x in full_data.columns.get_level_values(0).unique() if x not in ('Resource', 'AzMeta', 'Advisor')]
num_mask = [x[0] in data_group_names for x in full_data.columns.to_flat_index()]
styler = full_data.style.hide_index() \
.set_properties(**{'font-weight': 'bold'}, subset=[('Resource', 'resource_name')]) \
.format('{:.1f}', subset=num_mask, na_rep='N/A') \
.format('${:.2f}', subset=[('AzMeta', 'annual_savings')], na_rep='N/A') \
.set_table_styles(build_header_style(full_data.columns))
for data_group in data_group_names:
mask = [x == data_group for x in full_data.columns.get_level_values(0)]
styler = styler.apply(background_limit_coloring, axis=1, subset=mask)
styler
```
| github_jupyter |
# Cowell's formulation
For cases where we only study the gravitational forces, solving the Kepler's equation is enough to propagate the orbit forward in time. However, when we want to take perturbations that deviate from Keplerian forces into account, we need a more complex method to solve our initial value problem: one of them is **Cowell's formulation**.
In this formulation we write the two body differential equation separating the Keplerian and the perturbation accelerations:
$$\ddot{\mathbb{r}} = -\frac{\mu}{|\mathbb{r}|^3} \mathbb{r} + \mathbb{a}_d$$
<div class="alert alert-info">For an in-depth exploration of this topic, still to be integrated in poliastro, check out https://github.com/Juanlu001/pfc-uc3m</div>
<div class="alert alert-info">An earlier version of this notebook allowed for more flexibility and interactivity, but was considerably more complex. Future versions of poliastro and plotly might bring back part of that functionality, depending on user feedback. You can still download the older version <a href="https://github.com/poliastro/poliastro/blob/0.8.x/docs/source/examples/Propagation%20using%20Cowell's%20formulation.ipynb">here</a>.</div>
## First example
Let's setup a very simple example with constant acceleration to visualize the effects on the orbit.
```
import numpy as np
from astropy import units as u
from matplotlib import pyplot as plt
plt.ion()
from poliastro.bodies import Earth
from poliastro.twobody import Orbit
from poliastro.examples import iss
from poliastro.twobody.propagation import cowell
from poliastro.plotting import OrbitPlotter3D
from poliastro.util import norm
from plotly.offline import init_notebook_mode
init_notebook_mode(connected=True)
```
To provide an acceleration depending on an extra parameter, we can use **closures** like this one:
```
accel = 2e-5
def constant_accel_factory(accel):
def constant_accel(t0, u, k):
v = u[3:]
norm_v = (v[0]**2 + v[1]**2 + v[2]**2)**.5
return accel * v / norm_v
return constant_accel
def custom_propagator(orbit, tof, rtol, accel=accel):
# Workaround for https://github.com/poliastro/poliastro/issues/328
if tof == 0:
return orbit.r.to(u.km).value, orbit.v.to(u.km / u.s).value
else:
# Use our custom perturbation acceleration
return cowell(orbit, tof, rtol, ad=constant_accel_factory(accel))
times = np.linspace(0, 10 * iss.period, 500)
times
times, positions = iss.sample(times, method=custom_propagator)
```
And we plot the results:
```
frame = OrbitPlotter3D()
frame.set_attractor(Earth)
frame.plot_trajectory(positions, label="ISS")
frame.show()
```
## Error checking
```
def state_to_vector(ss):
r, v = ss.rv()
x, y, z = r.to(u.km).value
vx, vy, vz = v.to(u.km / u.s).value
return np.array([x, y, z, vx, vy, vz])
k = Earth.k.to(u.km**3 / u.s**2).value
rtol = 1e-13
full_periods = 2
u0 = state_to_vector(iss)
tf = ((2 * full_periods + 1) * iss.period / 2).to(u.s).value
u0, tf
iss_f_kep = iss.propagate(tf * u.s, rtol=1e-18)
r, v = cowell(iss, tf, rtol=rtol)
iss_f_num = Orbit.from_vectors(Earth, r * u.km, v * u.km / u.s, iss.epoch + tf * u.s)
iss_f_num.r, iss_f_kep.r
assert np.allclose(iss_f_num.r, iss_f_kep.r, rtol=rtol, atol=1e-08 * u.km)
assert np.allclose(iss_f_num.v, iss_f_kep.v, rtol=rtol, atol=1e-08 * u.km / u.s)
assert np.allclose(iss_f_num.a, iss_f_kep.a, rtol=rtol, atol=1e-08 * u.km)
assert np.allclose(iss_f_num.ecc, iss_f_kep.ecc, rtol=rtol)
assert np.allclose(iss_f_num.inc, iss_f_kep.inc, rtol=rtol, atol=1e-08 * u.rad)
assert np.allclose(iss_f_num.raan, iss_f_kep.raan, rtol=rtol, atol=1e-08 * u.rad)
assert np.allclose(iss_f_num.argp, iss_f_kep.argp, rtol=rtol, atol=1e-08 * u.rad)
assert np.allclose(iss_f_num.nu, iss_f_kep.nu, rtol=rtol, atol=1e-08 * u.rad)
```
## Numerical validation
According to [Edelbaum, 1961], a coplanar, semimajor axis change with tangent thrust is defined by:
$$\frac{\operatorname{d}\!a}{a_0} = 2 \frac{F}{m V_0}\operatorname{d}\!t, \qquad \frac{\Delta{V}}{V_0} = \frac{1}{2} \frac{\Delta{a}}{a_0}$$
So let's create a new circular orbit and perform the necessary checks, assuming constant mass and thrust (i.e. constant acceleration):
```
ss = Orbit.circular(Earth, 500 * u.km)
tof = 20 * ss.period
ad = constant_accel_factory(1e-7)
r, v = cowell(ss, tof.to(u.s).value, ad=ad)
ss_final = Orbit.from_vectors(Earth, r * u.km, v * u.km / u.s, ss.epoch + tof)
da_a0 = (ss_final.a - ss.a) / ss.a
da_a0
dv_v0 = abs(norm(ss_final.v) - norm(ss.v)) / norm(ss.v)
2 * dv_v0
np.allclose(da_a0, 2 * dv_v0, rtol=1e-2)
```
This means **we successfully validated the model against an extremely simple orbit transfer with approximate analytical solution**. Notice that the final eccentricity, as originally noticed by Edelbaum, is nonzero:
```
ss_final.ecc
```
## References
* [Edelbaum, 1961] "Propulsion requirements for controllable satellites"
| github_jupyter |
```
import pandas as pd
import numpy as np
import math
import keras
import tensorflow as tf
import progressbar
import os
from os import listdir
```
## Print Dependencies
Dependences are fundamental to record the computational environment.
```
%load_ext watermark
# python, ipython, packages, and machine characteristics
%watermark -v -m -p pandas,keras,numpy,math,tensorflow,matplotlib,h5py
# date
print (" ")
%watermark -u -n -t -z
```
## Load of the data
```
from process import loaddata
class_data0 = loaddata("../data/{}.csv".format('low_ene'))
class_data0 = class_data0[class_data0[:,0] > 0.001]
class_data0.shape
y0 = class_data0[:,0]
A0 = class_data0
A0[:,9] = A0[:,13]
x0 = class_data0[:,1:10]
```
## Check to see if the data are balanced now
```
from matplotlib import pyplot
y0 = np.array(y0)
bins = np.linspace(0, 0.55, 50)
n, edges, _ = pyplot.hist(y0, bins, color = 'indianred', alpha=0.5, label='Osiris')
#pyplot.hist(y_pred, bins, color = 'mediumslateblue', alpha=0.5, label='NN')
pyplot.legend(loc='upper right')
pyplot.xlabel('Probability')
pyplot.yscale('log')
pyplot.title('Trained on ($p_e$, $p_{\gamma}$, $\omega_e$, $\omega_{\gamma}$, n)')
pyplot.show()
def balance_data(class_data, nbins):
from matplotlib import pyplot as plt
y = class_data[:,0]
n, edges, _ = plt.hist(y, nbins, color = 'indianred', alpha=0.5, label='Osiris')
n_max = n.max()
data = []
for class_ in class_data:
for i in range(len(n)):
edges_min = edges[i]
edges_max = edges[i+1]
if class_[0] > edges_min and class_[0] < edges_max:
for j in range(int(n_max/n[i])):
data.append(class_)
break
return np.array(data)
class_data = balance_data(class_data0, 100)
np.random.shuffle(class_data)
y = class_data[:,0]
A = class_data
print(A[0])
A[:,9] = A[:,13]
print(A[0])
x = class_data[:,1:10]
print(x[0])
print(x.shape)
from matplotlib import pyplot
y0 = np.array(y0)
bins = np.linspace(0, 0.55, 100)
pyplot.hist(y, bins, color = 'indianred', alpha=0.5, label='Osiris')
#pyplot.hist(y_pred, bins, color = 'mediumslateblue', alpha=0.5, label='NN')
pyplot.legend(loc='upper right')
pyplot.xlabel('Probability')
pyplot.yscale('log')
pyplot.title('Trained on ($p_e$, $p_{\gamma}$, $\omega_e$, $\omega_{\gamma}$, n)')
pyplot.show()
train_split = 0.75
train_limit = int(len(y)*train_split)
print("Training sample: {0} \nValuation sample: {1}".format(train_limit, len(y)-train_limit))
x_train = x[:train_limit]
x_val = x[train_limit:]
y_train = y[:train_limit]
y_val = y[train_limit:]
```
## Model Build
```
from keras.models import Sequential
from keras.layers.core import Dense
import keras.backend as K
from keras import optimizers
from keras import models
from keras import layers
from keras.layers.normalization import BatchNormalization
def build_model() :
model = models.Sequential()
model.add (BatchNormalization(input_dim = 9))
model.add (layers.Dense (12 , activation = "sigmoid"))
model.add (layers.Dense (9 , activation = "relu"))
model.add (layers.Dense (1 , activation = "sigmoid"))
model.compile(optimizer = "adam" , loss = 'mae' , metrics = ["mape"])
return model
model = build_model ()
history = model.fit ( x_train, y_train, epochs = 1000, batch_size = 10000 , validation_data = (x_val, y_val) )
model.save("../models/classifier/{}_noposition2.h5".format('probability'))
model.summary()
import matplotlib.pyplot as plt
loss = history.history['loss']
val_loss = history.history['val_loss']
accuracy = history.history['mape']
val_accuracy = history.history['val_mape']
epochs = range(1, len(loss) + 1)
fig, ax1 = plt.subplots()
l1 = ax1.plot(epochs, loss, 'bo', label='Training loss')
vl1 = ax1.plot(epochs, val_loss, 'b', label='Validation loss')
ax1.set_title('Training and validation loss')
ax1.set_xlabel('Epochs')
ax1.set_ylabel('Loss (mae))')
ax2 = ax1.twinx()
ac2= ax2.plot(epochs, accuracy, 'o', c="red", label='Training acc')
vac2= ax2.plot(epochs, val_accuracy, 'r', label='Validation acc')
ax2.set_ylabel('mape')
lns = l1 + vl1 + ac2 + vac2
labs = [l.get_label() for l in lns]
ax2.legend(lns, labs, loc="center right")
fig.tight_layout()
#fig.savefig("acc+loss_drop.pdf")
fig.show()
```
## Probability density distribution
```
y0 = class_data0[:,0]
A0 = class_data0
A0[:,9] = A0[:,13]
x0 = class_data0[:,1:10]
y_pred = model.predict(x0)
y_pred
from matplotlib import pyplot
y = np.array(y)
bins = np.linspace(0, 0.8, 100)
pyplot.hist(y0, bins, color = 'indianred', alpha=0.5, label='Osiris')
pyplot.hist(y_pred, bins, color = 'mediumslateblue', alpha=0.5, label='NN')
pyplot.legend(loc='upper right')
pyplot.xlabel('Probability')
pyplot.yscale('log')
pyplot.title('Trained on ($p_e$, $p_{\gamma}$, $\omega_e$, $\omega_{\gamma}$, n)')
pyplot.show()
```
| github_jupyter |
## Recreation of Terry's Notebook with NgSpice
In this experiment we are going to recreate Terry's notebook with NgSpice simulation backend.
## Step 1: Set up Python3 and NgSpice
```
%matplotlib inline
import matplotlib.pyplot as plt
# check if ngspice can be found from python
from ctypes.util import find_library
ngspice_lib_filename = find_library('libngspice')
print(ngspice_lib_filename) ## if the result is none, make sure that libngspice is installed
import PySpice.Logging.Logging as Logging
logger = Logging.setup_logging()
from PySpice.Spice.NgSpice.Shared import NgSpiceShared
ngspice = NgSpiceShared.new_instance()
print(ngspice.exec_command('version -f'))
import nengo
import numpy as np
```
## Step 2: Define a single neuron
Let's start with the subcircuit of a single neuron. We are going to use voltage amplifier leaky-integrate and fire neurons discussed in Section 3.3 of Indiveri et al.(May 2011).
```
neuron_model = '''
.subckt my_neuron Vmem out cvar=100p vsupply=1.8 vtau=0.4 vthr=0.2 vb=1
V1 Vdd 0 {vsupply}
V6 Vtau 0 {vtau}
V2 Vthr 0 {vthr}
V3 Vb1 0 {vb}
C1 Vmem 0 {cvar}
M5 N001 N001 Vdd Vdd pmos l=0.5 w=1.2 ad=1.2 as=1.2 pd=4.4 ps=4.4
M6 N002 N001 Vdd Vdd pmos l=0.5 w=1.2 ad=1.2 as=1.2 pd=4.4 ps=4.4
M8 N001 Vmem N004 N004 nmos l=0.5 w=0.6 ad=0.6 as=0.6 pd=3.2 ps=3.2
M9 N002 Vthr N004 N004 nmos l=0.5 w=0.6 ad=0.6 as=0.6 pd=3.2 ps=3.2
M10 N004 Vb1 0 0 nmos l=0.5 w=0.6 ad=0.6 as=0.6 pd=3.2 ps=3.2
Mreset Vmem out 0 0 nmos l=0.5 w=0.6 ad=0.6 as=0.6 pd=3.2 ps=3.2
M7 N003 N002 0 0 nmos l=0.5 w=0.6 ad=0.6 as=0.6 pd=3.2 ps=3.2
M18 out N003 0 0 nmos l=0.5 w=0.6 ad=0.6 as=0.6 pd=3.2 ps=3.2
M19 N003 N002 Vdd Vdd pmos l=0.5 w=1.2 ad=1.2 as=1.2 pd=4.4 ps=4.4
M20 out N003 Vdd Vdd pmos l=0.5 w=1.2 ad=1.2 as=1.2 pd=4.4 ps=4.4
Mleak Vmem Vtau 0 0 nmos l=0.5 w=0.6 ad=0.6 as=0.6 pd=3.2 ps=3.2
.ends my_neuron
'''
```
Create the neuron's netlist
```
def create_neuron_netlist(N):
# N is the number of neurons
netlist = ''
for i in range(N):
netlist += 'x'+str(i)+' Vmem'+str(i)+' out'+str(i)+' my_neuron vsupply={vsource} cvar=150p vthr=0.25 \n'
netlist += 'Rload'+str(i)+' out'+str(i)+ ' 0 100k\n'
return netlist
netlist_neurons = create_neuron_netlist(1)
```
## Step 3: Generate the input
Now, let's generate some input and see what it does. We are going to use the WhiteSignal that Terry used; however,we are going to shink the signal in amplitude (since this would be a current signal in the circuit) and also increase the frequency of the signal.
```
stim = nengo.processes.WhiteSignal(period=10, high=5, seed=1).run(1, dt=0.001)
input_signal = [[i*1e-6, J[0]*10e-6] for i, J in enumerate(stim)] #scaling
```
Lets convert this signal to a current source.
```
def pwl_conv(signal):
# signal should be a list of lists where wach sublist has this form [time_value, current_value]
pwl_string = ''
for i in signal:
pwl_string += str(i[0]) + ' ' + str(i[1]) + ' '
return pwl_string
```
## Step 4: Generate remaining parts of the Spice Netlist
```
netlist_input = 'Iin0 Vdd Vmem0 PWL(' + pwl_conv(input_signal) +')\n' # Converting the input to a current source
## other setup parameters
args= {}
args['simulation_time'] = '1m'
args['simulation_step'] = '1u'
args['simulation_lib'] = '180nm.lib'
netlist_top= '''*Sample SPICE file
.include {simulation_lib}
.option scale=1u
.OPTIONS ABSTOL=1N VNTOL=1M.
.options savecurrents
.tran {simulation_step} {simulation_time} UIC
'''.format(**args)
netlist_bottom = '''
.end'''
## define the sources
netlist_source = '''
.param vsource = 1.8
Vdd Vdd 0 {vsource}
'''
netlist = netlist_top + netlist_source + neuron_model + netlist_input+ netlist_neurons+ netlist_bottom
```
## Step 5: Simulate the netlist
```
def simulate(circuit):
ngspice.load_circuit(circuit)
ngspice.run()
print('Plots:', ngspice.plot_names)
plot = ngspice.plot(simulation=None, plot_name=ngspice.last_plot)
return plot
out=simulate(netlist)
plt.plot(out['time']._data,out['@rload0[i]']._data, label='output_current')
plt.plot(out['time']._data,out['@iin0[current]']._data, label = 'input_current')
plt.legend()
```
Great! We have a system that does some sort of nonlinearity. Now let's create a feedforward system with a bunch of neurons and see if the system can be used for approximating a function.
## Step 6: Function approximation with a feedforward network
```
N = 50 # how many neurons there are
E = np.random.normal(size=(N, 1))
B = np.random.normal(size=(N))*0.1
netlist_neurons = create_neuron_netlist(N)
```
*Now let's feed that same stimulus to all the neurons and see how they behave.*
```
def create_neuron_current_netlist(E,B,stim,N):
# take the A matrix and the number of neurons
# refactor
netlist_input='\n'
signal = np.zeros((len(stim), N))
for i, J in enumerate(stim):
Js = np.dot(E, J)
for k, JJ in enumerate(Js):
signal[i][k] = JJ+B[k]
for k in range(N):
input_signal = [[i*1e-6, J*10e-6] for i, J in enumerate(signal[:,k])]
netlist_input += 'Iin'+str(k)+' Vdd Vmem'+str(k)+' PWL(' + pwl_conv(input_signal) +')\n\n'
return netlist_input
netlist_inputs = create_neuron_current_netlist(E,B,stim,N)
netlist = netlist_top + netlist_source + neuron_model + netlist_inputs+ netlist_neurons+ netlist_bottom
out=simulate(netlist)
```
So it seems we have some output from the ensemble. Lets convert this output to get the A matrix
```
def extract_A_matrix(result, N, stim):
t = np.linspace(min(out['time']._data), max(out['time']._data), len(stim))
temp_time = out['time']._data
inpterpolated_result = np.zeros((len(stim), N))
A = np.zeros((len(stim), N))
for j in range(N):
temp_str = '@rload'+str(j)+'[i]'
temp_out = result[temp_str]._data
inpterpolated_result[:,j] = np.interp(t, temp_time, temp_out)
A[:,j] = inpterpolated_result[:,j] > max(inpterpolated_result[:,j])/2
return A
A_from_spice = extract_A_matrix(out, N, stim)
plt.figure(figsize=(12,6))
plt.imshow(A_from_spice.T, aspect='auto', cmap='gray_r')
plt.show()
```
Cool! This is similar to the A matrix we got from Terry's notebook. We can also calculate the D matrix from this output and approximate the y(t)=x(t) function.
```
target = stim
D_from_spice, info = nengo.solvers.LstsqL2()(A_from_spice, target)
plt.plot(A_from_spice.dot(D_from_spice), label='output')
plt.plot(target, lw=3, label='target')
plt.legend()
plt.show()
print('RMSE:', info['rmses'])
```
*With spiking neuron models, it's very common to have a low-pass filter (i.e. a synapse) after the spike. Let's see what our output looks like with a low-pass filter applied.*
```
filt = nengo.synapses.Lowpass(0.01) #need to implement synapses in circuit
plt.plot(filt.filt(A_from_spice.dot(D_from_spice)), label='output (filtered)')
plt.plot(target, lw=3, label='target')
plt.legend()
plt.show()
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report
df = pd.read_excel("default of credit card clients.xls", header=1, index_col=0)
df
mindf = df.min(skipna = False)
mindf
maxdf = df.max(skipna = False)
maxdf
sns.countplot(x = 'default payment next month', data=df)
df.shape
df.iloc[:,:23]
from imblearn.combine import SMOTEENN
smote_enn = SMOTEENN(random_state=0)
X_resampled, y_resampled = smote_enn.fit_resample(df.iloc[:,:23], df['default payment next month'])
sns.barplot(x=[0,1],y=np.bincount(y_resampled))
X_resampled, y_resampled = smote_enn.fit_resample(df.iloc[:,:23], df['default payment next month'])
X_train1, X_test1, y_train1, y_test1 = train_test_split(X_resampled, y_resampled, test_size=0.33, random_state=43)
from imblearn.over_sampling import SMOTENC
sm = SMOTENC(random_state=42, categorical_features=[1,2,3,5,6,7,8,9,10])
X_res, y_res = sm.fit_resample(df.iloc[:,:23], df['default payment next month'])
sns.barplot(x=[0,1],y=np.bincount(y_res))
```
# Scale everything
```
scaler = StandardScaler()
X_scale = scaler.fit_transform(X_res)
X_scale_torch = torch.FloatTensor(X_scale)
y_scale_torch = torch.FloatTensor(y_res)
y_scale_torch
from skorch import NeuralNetBinaryClassifier
from classes import MyModule
class toTensor(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X):
return torch.FloatTensor(X)
class MyModule(nn.Module):
def __init__(self, num_units=128, dropoutrate = 0.5):
super(MyModule, self).__init__()
self.dropoutrate = dropoutrate
self.layer1 = nn.Linear(23, num_units)
self.nonlin = nn.ReLU()
self.dropout1 = nn.Dropout(self.dropoutrate)
self.dropout2 = nn.Dropout(self.dropoutrate)
self.layer2 = nn.Linear(num_units, num_units)
self.output = nn.Linear(num_units,1)
self.batchnorm1 = nn.BatchNorm1d(128)
self.batchnorm2 = nn.BatchNorm1d(128)
def forward(self, X, **kwargs):
X = self.nonlin(self.layer1(X))
X = self.batchnorm1(X)
X = self.dropout1(X)
X = self.nonlin(self.layer2(X))
X = self.batchnorm2(X)
X = self.dropout2(X)
X = self.output(X)
return X
model = NeuralNetBinaryClassifier(
MyModule(dropoutrate = 0.2),
max_epochs=40,
lr=0.01,
batch_size=128,
# Shuffle training data on each epoch
iterator_train__shuffle=True,
)
model.fit(X_scale_torch, y_scale_torch)
val_loss = []
train_loss = []
epochs = range(1,41)
for i in range(40):
val_loss.append(model.history[i]['valid_loss'])
train_loss.append(model.history[i]['train_loss'])
dfloss = (pd.DataFrame({'epoch': epochs, 'val_loss': val_loss, 'train_loss': train_loss},
columns=['epoch', 'val_loss', 'train_loss']).set_index('epoch'))
sns.lineplot(data=dfloss)
from skorch.helper import SliceDataset
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_validate
train_slice = SliceDataset(X_scale_torch)
y_slice = SliceDataset(y_scale_torch)
scores = cross_validate(model, X_scale_torch, y_scale_torch, scoring='accuracy', cv=4)
import functools as f
print('validation accuracy for each fold: {}'.format(scores))
#print('avg validation accuracy: {:.3f}'.format(scores.mean()))
#loop through the dictionary
for key,value in scores.items():
#use reduce to calculate the avg
print(f"Average {key}", f.reduce(lambda x, y: x + y, scores[key]) / len(scores[key]))
from sklearn.model_selection import GridSearchCV
params = {
'lr': [0.01, 0.001],
'module__dropoutrate': [0.2, 0.5]
}
model.module
gs = GridSearchCV(model, params, refit=False, cv=4, scoring='accuracy', verbose=2)
gs_results = gs.fit(X_scale_torch,y_scale_torch )
for key in gs.cv_results_.keys():
print(key, gs.cv_results_[key])
import pickle
with open('model1.pkl', 'wb') as f:
pickle.dump(model, f)
model.save_params(
f_params='model.pkl', f_optimizer='opt.pkl', f_history='history.json')
from sklearn.pipeline import Pipeline
from sklearn.base import TransformerMixin, BaseEstimator
from classes import toTensor
pipeline = Pipeline([
('scale', StandardScaler()),
('tensor',toTensor()),
('classification',model)
])
pipeline.fit(X_res, torch.FloatTensor(y_res))
import joblib
with open('model1.pkl', 'wb') as f:
joblib.dump(pipeline,f)
jinput = X_res.iloc[15].to_json()
jinput
{
"LIMIT_BAL": 20000,
"SEX": 2,
"EDUCATION": 2,
"MARRIAGE": 1,
"AGE": 24,
"PAY_0": 2,
"PAY_2": -1,
"PAY_3": -1,
"PAY_4": -1,
"PAY_5": -2,
"PAY_6": -2,
"BILL_AMT1": 3913,
"BILL_AMT2": 3102,
"BILL_AMT3": 689,
"BILL_AMT4": 0,
"BILL_AMT5": 0,
"BILL_AMT6": 0,
"PAY_AMT1": 0,
"PAY_AMT2": 689,
"PAY_AMT3": 0,
"PAY_AMT4": 0,
"PAY_AMT5": 0,
"PAY_AMT6": 0
}
import requests
bashCommand = f"""curl -X 'POST' 'http://127.0.0.1:8000/predict' -H 'accept: application/json' -H 'Content-Type: application/json' -d {jinput}"""
headers = {
}
res = requests.post('http://127.0.0.1:8000/predict', data=jinput, headers=headers)
res.text
%%timeit
res = requests.post('http://127.0.0.1:8000/predict', data=jinput, headers=headers)
```
# Embeds
```
df.columns
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import StandardScaler
lincolumns = (['LIMIT_BAL','AGE', 'BILL_AMT1', 'BILL_AMT2', 'BILL_AMT3', 'BILL_AMT4', 'BILL_AMT5', 'BILL_AMT6',
'PAY_AMT1', 'PAY_AMT2', 'PAY_AMT3', 'PAY_AMT4', 'PAY_AMT5', 'PAY_AMT6'])
ct = ColumnTransformer([
('scalethis', StandardScaler(), lincolumns)
], remainder='passthrough')
ct2 = ct.fit_transform(df.iloc[:,:23])
dfct2 = pd.DataFrame(ct2)
dfct2
df_numeric = dfct2.iloc[:,:14]
df_cat = dfct2.iloc[:,14:]
df_cat1 = df_cat.iloc[:,0]
df_cat2 = df_cat.iloc[:,1]
df_cat3 = df_cat.iloc[:,2]
df_cat4 = df_cat.iloc[:,3:]
df_cat4
def emb_sz_rule(n_cat):
return min(600, round(1.6 * n_cat**0.56))
embed = nn.Embedding(2, emb_sz_rule(2))
embed(torch.tensor(df_cat1.values).to(torch.int64))
def emb_sz_rule(n_cat):
return min(600, round(1.6 * n_cat**0.56))
class MyModule(nn.Module):
def __init__(self, num_inputs=23, num_units_d1=128, num_units_d2=128)):
super(MyModule, self).__init__()
self.dense0 = nn.Linear(20, num_units)
self.nonlin = nonlin
self.dropout = nn.Dropout(0.5)
self.dense1 = nn.Linear(num_units, num_units)
self.output = nn.Linear(num_units, 2)
self.softmax = nn.Softmax(dim=-1)
self.embed1 = nn.Embedding(2, emb_sz_rule(2))
self.embed2 = nn.Embedding(7, emb_sz_rule(7))
self.embed3 = nn.Embedding(4, emb_sz_rule(4))
self.embed4 = nn.Embedding(11, emb_sz_rule(11))
def forward(self, X, cat1, cat2, cat3, cat4):
x1 = self.embed1(cat1)
x2 = self.embed2(cat2)
x3 = self.embed3(cat3)
x4 = self.embed4(cat4)
X = torch.cat((X,x1,x2,x3,x4), dim=1)
X = self.nonlin(self.dense0(X))
X = self.dropout(X)
X = self.nonlin(self.dense1(X))
X = self.softmax(self.output(X))
return X
model = NeuralNetBinaryClassifier(
MyModule,
max_epochs=40,
lr=0.001,
# Shuffle training data on each epoch
iterator_train__shuffle=True,
)
EPOCHS = 50
BATCH_SIZE = 64
LEARNING_RATE = 0.001
class BinaryClassification(nn.Module):
def __init__(self):
super(BinaryClassification, self).__init__()
self.layer_1 = nn.Linear(23, 64)
self.layer_2 = nn.Linear(64, 64)
self.layer_out = nn.Linear(64, 1)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=0.1)
self.batchnorm1 = nn.BatchNorm1d(64)
self.batchnorm2 = nn.BatchNorm1d(64)
def forward(self, inputs):
x = self.relu(self.layer_1(inputs))
x = self.batchnorm1(x)
x = self.relu(self.layer_2(x))
x = self.batchnorm2(x)
x = self.dropout(x)
x = self.layer_out(x)
return x
class MyModule(nn.Module):
def __init__(self, num_inputs=23, num_units_d1=128, num_units_d2=128)):
super(MyModule, self).__init__()
self.dense0 = nn.Linear(20, num_units)
self.nonlin = nonlin
self.dropout = nn.Dropout(0.5)
self.dense1 = nn.Linear(num_units, num_units)
self.output = nn.Linear(num_units, 2)
self.softmax = nn.Softmax(dim=-1)
def forward(self, X, **kwargs):
X = self.nonlin(self.dense0(X))
X = self.dropout(X)
X = self.nonlin(self.dense1(X))
X = self.softmax(self.output(X))
return X
model = NeuralNetBinaryClassifier(
MyModule,
max_epochs=40,
lr=0.001,
# Shuffle training data on each epoch
iterator_train__shuffle=True,
)
```
# Py Torch
```
## train data
class TrainData(Dataset):
def __init__(self, X_data, y_data):
self.X_data = X_data
self.y_data = y_data
def __getitem__(self, index):
return self.X_data[index], self.y_data[index]
def __len__ (self):
return len(self.X_data)
train_data = TrainData(torch.FloatTensor(X_train), torch.FloatTensor(y_train.to_numpy(dtype=np.float64)))
## test data
class TestData(Dataset):
def __init__(self, X_data):
self.X_data = X_data
def __getitem__(self, index):
return self.X_data[index]
def __len__ (self):
return len(self.X_data)
test_data = TestData(torch.FloatTensor(X_test))
train_loader = DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)
test_loader = DataLoader(dataset=test_data, batch_size=1)
class BinaryClassification(nn.Module):
def __init__(self):
super(BinaryClassification, self).__init__()
self.layer_1 = nn.Linear(23, 64)
self.layer_2 = nn.Linear(64, 64)
self.layer_out = nn.Linear(64, 1)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=0.1)
self.batchnorm1 = nn.BatchNorm1d(64)
self.batchnorm2 = nn.BatchNorm1d(64)
def forward(self, inputs):
x = self.relu(self.layer_1(inputs))
x = self.batchnorm1(x)
x = self.relu(self.layer_2(x))
x = self.batchnorm2(x)
x = self.dropout(x)
x = self.layer_out(x)
return x
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
model = BinaryClassification()
model.to(device)
print(model)
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)
def binary_acc(y_pred, y_test):
y_pred_tag = torch.round(torch.sigmoid(y_pred))
correct_results_sum = (y_pred_tag == y_test).sum().float()
acc = correct_results_sum/y_test.shape[0]
acc = torch.round(acc * 100)
return acc
model.train()
for e in range(1, EPOCHS+1):
epoch_loss = 0
epoch_acc = 0
for X_batch, y_batch in train_loader:
X_batch, y_batch = X_batch.to(device), y_batch.to(device)
optimizer.zero_grad()
y_pred = model(X_batch)
loss = criterion(y_pred, y_batch.unsqueeze(1))
acc = binary_acc(y_pred, y_batch.unsqueeze(1))
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_acc += acc.item()
print(f'Epoch {e+0:03}: | Loss: {epoch_loss/len(train_loader):.5f} | Acc: {epoch_acc/len(train_loader):.3f}')
y_pred_list = []
model.eval()
with torch.no_grad():
for X_batch in test_loader:
X_batch = X_batch.to(device)
y_test_pred = model(X_batch)
y_test_pred = torch.sigmoid(y_test_pred)
y_pred_tag = torch.round(y_test_pred)
y_pred_list.append(y_pred_tag.cpu().numpy())
y_pred_list = [a.squeeze().tolist() for a in y_pred_list]
confusion_matrix(y_test, y_pred_list)
print(classification_report(y_test, y_pred_list))
# use the original data
scaler = StandardScaler()
X_og = scaler.fit_transform(df.iloc[:,:23])
X_og
og_data = TestData(torch.FloatTensor(X_og))
og_loader = DataLoader(dataset=og_data, batch_size=1)
og_y_pred_list = []
model.eval()
with torch.no_grad():
for X_batch in og_loader:
X_batch = X_batch.to(device)
y_test_pred = model(X_batch)
y_test_pred = torch.sigmoid(y_test_pred)
y_pred_tag = torch.round(y_test_pred)
og_y_pred_list.append(y_pred_tag.cpu().numpy())
og_y_pred_list = [a.squeeze().tolist() for a in og_y_pred_list]
confusion_matrix(df['default payment next month'].to_numpy(dtype=np.float64), og_y_pred_list)
print(classification_report(df['default payment next month'].to_numpy(dtype=np.float64), og_y_pred_list))
torch.save(model.state_dict(), "model1.pt")
https://towardsdatascience.com/pytorch-tabular-binary-classification-a0368da5bb89
```
| github_jupyter |
# For Loops (2) - Looping through the items in a sequence
In the last lesson we introduced the concept of a For loop and learnt how we can use them to repeat a section of code. We learnt how to write a For loop that repeats a piece of code a specific number of times using the <code>range()</code> function, and saw that we have to create a variable to keep track of our position in the loop (conventionally called <code>i</code>). We also found out how to implement if-else statements within our loop to change which code is run inside the loop.
As well as writing a loop which runs a specific number of times, we can also create a loop which acts upon each item in a sequence. In this lesson we'll learn how to implement this functionality and find out how to use this knowledge to help us make charts with Plotly.
## Looping through each item in a sequence
Being able to access each item in turn in a sequence is a really useful ability and one which we'll use often in this course. The syntax is very similar to that which we use to loop through the numbers in a range:
```` python
for <variable name> in <sequence>:
<code to run>
````
The difference here is that the variable which keeps track of our position in the loop does not increment by 1 each time the loop is run. Instead, the variable takes the value of each item in the sequence in turn:
```
list1 = ['a', 'b', 'c', 'd', 'e']
for item in list1:
print(item)
```
It's not important what we call this variable:
```
for banana in list1:
print(banana)
```
But it's probably a good idea to call the variable something meaningful:
```
data = [20, 50, 10, 67]
for d in data:
print(d)
```
## Using these loops
We can use these loops in conjunction with other concepts we have already learnt. For example, imagine that you had a list of proportions stored as decimals, but that you needed to create a new list to store them as whole numbers.
We can use <code>list.append()</code> with a for loop to create this new list. First, we have to create an empty list to which we'll append the percentages:
```
proportions = [0.3, 0.45, 0.99, 0.23, 0.46]
percentages = []
```
Next, we'll loop through each item in proportions, multiply it by 100 and append it to percentages:
```
for prop in proportions:
percentages.append(prop * 100)
print(percentages)
```
## Using for loops with dictionaries
We've seen how to loop through each item in a list. We will also make great use of the ability to loop through the keys and values in a dictionary.
If you remember from the dictionaries lessons, we can get the keys and values in a dictionary by using <code>dict.items()</code>. We can use this in conjunction with a for loop to manipulate each item in a dictionary. This is something which we'll use often; we'll often have data for several years stored in a dictionary; looping through these items will let us plot the data really easily.
In the cell below, I've created a simple data structure which we'll access using a for loop. Imagine that this data contains sales figures for the 4 quarters in a year:
```
data = {2009 : [10,20,30,40],
2010 : [15,30,45,60],
2011 : [7,14,21,28],
2012 : [5,10,15,20]}
```
We can loop through the keys by using <code>dict.keys()</code>:
```
for k in data.keys():
print(k)
```
And we can loop through the values (which are lists):
```
for v in data.values():
print(v)
```
We can loop through them both together:
```
for k, v in data.items():
print(k, v)
```
Having the data available to compare each year is really handy, but it might also be helpful to store them as one long list so we can plot the data and see trends over time.
First, we'll make a new list to store all of the data items:
```
allYears = []
```
And then we'll loop through the dictionary and concatenate each year's data to the <code>allYears</code> list:
```
for v in data.values():
allYears = allYears + v
print(allYears)
```
### What have we learnt this lesson?
In this lesson we've seen how to access each item in a sequence. We've learnt that the variable that keeps track of our position in the loop stores each value in the sequence in turn. We've seen how to apply this knowledge to loop through a dictionary of data and concatenate data for several years into one long list.
If you have any questions, please ask in the comments section or email <a href="mailto:[email protected]">[email protected]</a>
| github_jupyter |
```
import os, time, datetime
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm
import random
import logging
tqdm.pandas()
import seaborn as sns
from sklearn.model_selection import train_test_split
#NN Packages
import torch
import torch.nn as nn
from torch.utils.data import TensorDataset, random_split,DataLoader, RandomSampler, SequentialSampler
logger = logging.getLogger(__name__)
if torch.cuda.is_available():
# Tell PyTorch to use the GPU.
device = torch.device("cuda")
print('There are %d GPU(s) available.' % torch.cuda.device_count())
print('We will use the GPU:', torch.cuda.get_device_name(0))
# If not...
else:
print('No GPU available, using the CPU instead.')
device = torch.device("cpu")
def format_time(elapsed):
'''
Takes a time in seconds and returns a string hh:mm:ss
'''
# Round to the nearest second.
elapsed_rounded = int(round((elapsed)))
# Format as hh:mm:ss
return str(datetime.timedelta(seconds=elapsed_rounded))
class SigirPreprocess():
def __init__(self, text_data_path):
self.text_data_path = text_data_path
self.train = None
self.dict_code_to_id = {}
self.dict_id_to_code = {}
self.list_tags = {}
self.sentences = []
self.labels = []
self.text_col = None
self.X_test = None
def prepare_data(self ):
catalog_eng= pd.read_csv(self.text_data_path+"data/catalog_english_taxonomy.tsv",sep="\t")
X_train= pd.read_csv(self.text_data_path+"data/X_train.tsv",sep="\t")
Y_train= pd.read_csv(self.text_data_path+"data/Y_train.tsv",sep="\t")
self.list_tags = list(Y_train['Prdtypecode'].unique())
for i,tag in enumerate(self.list_tags):
self.dict_code_to_id[tag] = i
self.dict_id_to_code[i]=tag
print(self.dict_code_to_id)
Y_train['labels']=Y_train['Prdtypecode'].map(self.dict_code_to_id)
train=pd.merge(left=X_train,right=Y_train,
how='left',left_on=['Integer_id','Image_id','Product_id'],
right_on=['Integer_id','Image_id','Product_id'])
prod_map=pd.Series(catalog_eng['Top level category'].values,
index=catalog_eng['Prdtypecode']).to_dict()
train['product'] = train['Prdtypecode'].map(prod_map)
train['title_len']=train['Title'].progress_apply(lambda x : len(x.split()) if pd.notna(x) else 0)
train['desc_len']=train['Description'].progress_apply(lambda x : len(x.split()) if pd.notna(x) else 0)
train['title_desc_len']=train['title_len'] + train['desc_len']
train.loc[train['Description'].isnull(), 'Description'] = " "
train['title_desc'] = train['Title'] + " " + train['Description']
self.train = train
def get_sentences(self, text_col, remove_null_rows=False):
self.text_col = text_col
if remove_null_rows==True:
new_train = self.train[self.train[text_col].notnull()]
else:
new_train = self.train.copy()
self.sentences = new_train[text_col].values
self.labels = new_train['labels'].values
def prepare_test(self, text_col):
X_test=pd.read_csv(self.text_data_path+"data/x_test_task1_phase1.tsv",sep="\t")
X_test.loc[X_test['Description'].isnull(), 'Description'] = " "
X_test['title_desc'] = X_test['Title'] + " " + X_test['Description']
self.X_test = X_test
self.test_sentences = X_test[text_col].values
text_col = 'title_desc'
max_len = 256
val_size = 0.1
Preprocess = SigirPreprocess("/kaggle/input/textphase1/")
Preprocess.prepare_data()
Preprocess.get_sentences(text_col, True)
sentences = Preprocess.sentences
labels = Preprocess.labels
print("Total number of sentences:{}, labels:{}".format(len(sentences), len(labels)))
#function to prepare input for model training
def prep_input(sentences,labels, max_len,tokenizer):
input_ids = []
attention_masks = []
# For every sentence...
for sent in tqdm(sentences):
# `encode_plus` will:
# (1) Tokenize the sentence.
# (2) Prepend the `[CLS]` token to the start.
# (3) Append the `[SEP]` token to the end.
# (4) Map tokens to their IDs.
# (5) Pad or truncate the sentence to `max_length`
# (6) Create attention masks for [PAD] tokens.
encoded_dict = tokenizer.encode_plus(
sent, # Sentence to encode.
add_special_tokens = True, # Add '[CLS]' and '[SEP]'
max_length = max_len, # Pad & truncate all sentences.
pad_to_max_length = True,
return_attention_mask = True, # Construct attn. masks.
return_tensors = 'pt', # Return pytorch tensors.
)
# Add the encoded sentence to the list.
input_ids.append(encoded_dict['input_ids'])
# And its attention mask (simply differentiates padding from non-padding).
attention_masks.append(encoded_dict['attention_mask'])
# Convert the lists into tensors.
input_ids = torch.cat(input_ids, dim=0)
attention_masks = torch.cat(attention_masks, dim=0)
if labels is not None:
labels = torch.tensor(labels)
return input_ids,attention_masks,labels
else:
return input_ids,attention_masks
text_input='../input/multi-modal-input-text/'
tr_inputs_cam=torch.load(text_input+"tr_inputs_cam.pt")
val_inputs_cam=torch.load(text_input+"val_inputs_cam.pt")
tr_masks_cam=torch.load( text_input+"tr_masks_cam.pt")
val_masks_cam=torch.load( text_input+"val_masks_cam.pt")
tr_inputs_flau=torch.load(text_input+"tr_inputs_flau.pt")
val_inputs_flau=torch.load(text_input+"val_inputs_flau.pt")
tr_masks_flau=torch.load(text_input+"tr_masks_flau.pt")
val_masks_flau=torch.load(text_input+"val_masks_flau.pt")
!pip install pretrainedmodels
from transformers import CamembertConfig, CamembertTokenizer, CamembertModel, CamembertForSequenceClassification, AdamW
from transformers import FlaubertModel, FlaubertTokenizer,FlaubertForSequenceClassification,AdamW, FlaubertConfig
from transformers.modeling_roberta import RobertaClassificationHead
from transformers.modeling_utils import SequenceSummary
from torch.nn import functional as F
import torch.nn as nn
import pretrainedmodels
class SEResnext50_32x4d(nn.Module):
def __init__(self, pretrained='imagenet'):
super(SEResnext50_32x4d, self).__init__()
self.base_model = pretrainedmodels.__dict__["se_resnext50_32x4d"](pretrained=None)
if pretrained is not None:
self.base_model.load_state_dict(
torch.load("../input/pretrained-model-weights-pytorch/se_resnext50_32x4d-a260b3a4.pth"
)
)
self.l0 = nn.Linear(2048, 27)
def forward(self, image):
batch_size, _, _, _ = image.shape
x = self.base_model.features(image)
x = F.adaptive_avg_pool2d(x, 1).reshape(batch_size, -1)
out = self.l0(x)
return out
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class vec_output_CamembertForSequenceClassification(CamembertModel):
config_class = CamembertConfig
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = CamembertModel(config)
self.dense = nn.Linear(256*config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(0.1)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
):
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
# output_attentions=output_attentions,
# output_hidden_states=output_hidden_states,
)
sequence_output = outputs[0] #(B,256,768)
x = sequence_output.view(sequence_output.shape[0], 256*768)
# x = sequence_output[:, 0, :] # take <s> token (equiv. to [CLS])-> #(B,768) Image -> (B,2048)
x = self.dense(x) # 768 -> 768
feat= torch.tanh(x)
logits = self.out_proj(feat) # 768 -> 27
outputs = (logits,) + outputs[2:]
return outputs,feat # (loss), logits, (hidden_states), (attentions)
num_classes = 27
class vec_output_FlaubertForSequenceClassification(FlaubertModel):
config_class = FlaubertConfig
def __init__(self, config):
super().__init__(config)
self.transformer = FlaubertModel(config)
self.sequence_summary = SequenceSummary(config)
self.init_weights()
self.dropout = torch.nn.Dropout(0.1)
self.classifier = torch.nn.Linear(config.hidden_size, num_classes)
def forward(
self,
input_ids=None,
attention_mask=None,
langs=None,
token_type_ids=None,
position_ids=None,
lengths=None,
cache=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
#output = self.dropout(output)
output = transformer_outputs[0]
vec = output[:,0]
#logits
dense = self.dropout(vec)
#classifier
logits = self.classifier(dense)
outputs = (logits,) + transformer_outputs[1:] # Keep new_mems and attention/hidden states if they are here
return outputs,dense
```
### Image data prep
```
catalog_eng= pd.read_csv("/kaggle/input/textphase1/data/catalog_english_taxonomy.tsv",sep="\t")
X_train= pd.read_csv("/kaggle/input/textphase1/data/X_train.tsv",sep="\t")
Y_train= pd.read_csv("/kaggle/input/textphase1/data/Y_train.tsv",sep="\t")
X_test=pd.read_csv("/kaggle/input/textphase1/data/x_test_task1_phase1.tsv",sep="\t")
dict_code_to_id = {}
dict_id_to_code={}
list_tags = list(Y_train['Prdtypecode'].unique())
for i,tag in enumerate(list_tags):
dict_code_to_id[tag] = i
dict_id_to_code[i]=tag
Y_train['labels']=Y_train['Prdtypecode'].map(dict_code_to_id)
train=pd.merge(left=X_train,right=Y_train,
how='left',left_on=['Integer_id','Image_id','Product_id'],
right_on=['Integer_id','Image_id','Product_id'])
prod_map=pd.Series(catalog_eng['Top level category'].values,index=catalog_eng['Prdtypecode']).to_dict()
train['product']=train['Prdtypecode'].map(prod_map)
def get_img_path(img_id,prd_id,path):
pattern = 'image'+'_'+str(img_id)+'_'+'product'+'_'+str(prd_id)+'.jpg'
return path + pattern
train_img = train[['Image_id','Product_id','labels','product']]
train_img['image_path']=train_img.progress_apply(lambda x: get_img_path(x['Image_id'],x['Product_id'],
path = '/kaggle/input/imagetrain/image_training/'),axis=1)
X_test['image_path']=X_test.progress_apply(lambda x: get_img_path(x['Image_id'],x['Product_id'],
path='/kaggle/input/imagetest/image_test/image_test_task1_phase1/'),axis=1)
train_df, val_df, _, _ = train_test_split(train_img, train_img['labels'],random_state=2020, test_size = 0.1, stratify=train_img['labels'])
input_size = 224 # for Resnt
# Applying Transforms to the Data
from torchvision import datasets, models, transforms
image_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(size=256, scale=(0.8, 1.0)),
transforms.RandomRotation(degrees=15),
transforms.RandomHorizontalFlip(),
transforms.Resize(size=256),
transforms.CenterCrop(size=input_size),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]),
'valid': transforms.Compose([
transforms.Resize(size=256),
transforms.CenterCrop(size=input_size),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]),
'test': transforms.Compose([
transforms.Resize(size=256),
transforms.CenterCrop(size=input_size),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
])
}
from torch.utils.data import Dataset, DataLoader, Subset
import cv2
from PIL import Image
class FusionDataset(Dataset):
def __init__(self,df,inputs_cam,masks_cam,inputs_flau,masks_flau,transform=None,mode='train'):
self.df = df
self.transform=transform
self.mode=mode
self.inputs_cam=inputs_cam
self.masks_cam=masks_cam
self.inputs_flau=inputs_flau
self.masks_flau=masks_flau
def __len__(self):
return len(self.df)
def __getitem__(self,idx):
im_path = self.df.iloc[idx]['image_path']
img = cv2.imread(im_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img=Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
img=img.cuda()
input_id_cam=self.inputs_cam[idx].cuda()
input_mask_cam=self.masks_cam[idx].cuda()
input_id_flau=self.inputs_flau[idx].cuda()
input_mask_flau=self.masks_flau[idx].cuda()
if self.mode=='test':
return img,input_id_cam,input_mask_cam,input_id_flau,input_mask_flau
else:
# labels = torch.tensor(self.df.iloc[idx]['labels'])
labels = torch.tensor(self.df.iloc[idx]['labels']).cuda()
return img,input_id_cam,input_mask_cam,input_id_flau,input_mask_flau,labels
a1 = torch.randn(3,10,10)
reduce_dim=nn.Conv1d(in_channels = 10 , out_channels = 1 , kernel_size= 1)
reduce_dim(a1).view(3,10).shape
class vector_fusion(nn.Module):
def __init__(self):
super(vector_fusion, self).__init__()
self.img_model = SEResnext50_32x4d(pretrained=None)
self.img_model.load_state_dict(torch.load('../input/seresnext2048/best_model.pt'))
self.img_model.l0=Identity()
for params in self.img_model.parameters():
params.requires_grad=False
self.cam_model= vec_output_CamembertForSequenceClassification.from_pretrained(
'camembert-base', # Use the 12-layer BERT model, with an uncased vocab.
num_labels = len(Preprocess.dict_code_to_id), # The number of output labels--2 for binary classification.
# You can increase this for multi-class tasks.
output_attentions = False, # Whether the model returns attentions weights.
output_hidden_states = False,) # Whether the model returns all hidden-states.
cam_model_path = '../input/camembert-vec-256m768-10ep/best_model.pt'
checkpoint = torch.load(cam_model_path)
# model = checkpoint['model']
self.cam_model.load_state_dict(checkpoint)
for param in self.cam_model.parameters():
param.requires_grad=False
self.cam_model.out_proj=Identity()
self.flau_model=vec_output_FlaubertForSequenceClassification.from_pretrained(
'flaubert/flaubert_base_cased',
num_labels = len(Preprocess.dict_code_to_id),
output_attentions = False,
output_hidden_states = False,)
flau_model_path='../input/flaubert-8933/best_model.pt'
checkpoint = torch.load(flau_model_path)
self.flau_model.load_state_dict(checkpoint)
for param in self.flau_model.parameters():
param.requires_grad=False
self.flau_model.classifier=Identity()
self.reduce_dim=nn.Conv1d(in_channels = 2048 , out_channels = 768 , kernel_size= 1)
self.reduce_dim2=nn.Conv1d(in_channels = 768 , out_channels = 1 , kernel_size= 1)
self.out=nn.Linear(768*3, 27)
#gamma
# self.w1 = nn.Parameter(torch.zeros(1))
# self.w2 = nn.Parameter(torch.zeros(1))
# self.w3 = nn.Parameter(torch.zeros(1))
def forward(self,img,input_id_cam,input_mask_cam,input_id_flau,input_mask_flau):
cam_emb,vec1 =self.cam_model(input_id_cam,
token_type_ids=None,
attention_mask=input_mask_cam)
flau_emb,vec2 =self.flau_model(input_id_flau,
token_type_ids=None,
attention_mask=input_mask_flau)
#Projecting the image embedding to lower dimension
img_emb=self.img_model(img)
img_emb=img_emb.view(img_emb.shape[0],img_emb.shape[1],1)
img_emb=self.reduce_dim(img_emb)
img_emb=img_emb.view(img_emb.shape[0],img_emb.shape[1]) ###### bs * 768
#summing up the vectors
#text_emb = cam_emb[0] + flau_emb[0]
#Bilinear
#text_emb = text_emb.view(text_emb.shape[0],1,text_emb.shape[1]) ##### bs * 1 * 768
#Bilinear Pooling
#pool_emb = torch.bmm(img_emb,text_emb) ### bs * 768 * 768
#pool_emb = self.reduce_dim2(pool_emb).view(text_emb.shape[0],768) #### bs * 1 * 768
fuse= torch.cat([img_emb,cam_emb[0],flau_emb[0]],axis=1)
logits=self.out(fuse)
return logits
model=vector_fusion()
model.cuda()
train_dataset=FusionDataset(train_df,tr_inputs_cam,tr_masks_cam,tr_inputs_flau,tr_masks_flau,transform=image_transforms['test'])
val_dataset=FusionDataset(val_df,val_inputs_cam,val_masks_cam,val_inputs_flau,val_masks_flau,transform=image_transforms['test'])
# test_dataset=FusionDataset(X_test,test_inputs,test_makss,transform=image_transforms['test'],mode='test')
batch_size=64
train_dataloader=DataLoader(train_dataset,batch_size=batch_size,shuffle=True)
validation_dataloader=DataLoader(val_dataset,batch_size=batch_size,shuffle=False)
# test_data=DataLoader(test_dataset,batch_size=batch_size,shuffle=False)
optimizer = AdamW(model.parameters(),
lr = 2e-5, # args.learning_rate - default is 5e-5, our notebook had 2e-5
eps = 1e-8 # args.adam_epsilon - default is 1e-8.
)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
count_parameters(model)
from transformers import get_linear_schedule_with_warmup
# Number of training epochs. The BERT authors recommend between 2 and 4.
# We chose to run for 4, but we'll see later that this may be over-fitting the
# training data.
epochs = 3
# Total number of training steps is [number of batches] x [number of epochs].
# (Note that this is not the same as the number of training samples).
total_steps = len(train_dataloader) * epochs
# Create the learning rate scheduler.
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps = 0, # Default value in run_glue.py
num_training_steps = total_steps)
import torch.nn as nn
loss_criterion = nn.CrossEntropyLoss()
def flat_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
from sklearn.metrics import f1_score
seed_val = 42
random.seed(seed_val)
np.random.seed(seed_val)
torch.manual_seed(seed_val)
torch.cuda.manual_seed_all(seed_val)
# We'll store a number of quantities such as training and validation loss,
# validation accuracy, and timings.
training_stats = []
# Measure the total training time for the whole run.
total_t0 = time.time()
# For each epoch...
for epoch_i in range(0, epochs):
# ========================================
# Training
# ========================================
# Perform one full pass over the training set.
print("")
print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs))
print('Training...')
#tr and val
# vec_output_tr = []
# vec_output_val =[]
# Measure how long the training epoch takes.
t0 = time.time()
# Reset the total loss for this epoch.
total_train_loss = 0
# Put the model into training mode. Don't be mislead--the call to
# `train` just changes the *mode*, it doesn't *perform* the training.
# `dropout` and `batchnorm` layers behave differently during training
# vs. test (source: https://stackoverflow.com/questions/51433378/what-does-model-train-do-in-pytorch)
best_f1 = 0
model.train()
# For each batch of training data...
for step, batch in tqdm(enumerate(train_dataloader)):
# Unpack this training batch from our dataloader.
#
# As we unpack the batch, we'll also copy each tensor to the GPU using the
# `to` method.
#
# `batch` contains three pytorch tensors:
# [0]: input ids
# [1]: attention masks
# [2]: labels
# return img,input_id_cam,input_mask_cam,input_id_flau,input_mask_flau
b_img=batch[0].to(device)
b_input_id_cam = batch[1].to(device)
b_input_mask_cam = batch[2].to(device)
b_input_id_flau = batch[3].to(device)
b_input_mask_flau = batch[4].to(device)
b_labels = batch[5].to(device)
model.zero_grad()
logits = model(b_img,b_input_id_cam ,b_input_mask_cam,b_input_id_flau,b_input_mask_flau)
#Defining the loss
loss = loss_criterion(logits, b_labels)
#saving the features_tr
# vec = vec.detach().cpu().numpy()
# vec_output_tr.extend(vec)
# Accumulate the training loss over all of the batches so that we can
# calculate the average loss at the end. `loss` is a Tensor containing a
# single value; the `.item()` function just returns the Python value
# from the tensor.
total_train_loss += loss.item()
# Perform a backward pass to calculate the gradients.
loss.backward()
# Clip the norm of the gradients to 1.0.
# This is to help prevent the "exploding gradients" problem.
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
# Update parameters and take a step using the computed gradient.
# The optimizer dictates the "update rule"--how the parameters are
# modified based on their gradients, the learning rate, etc.
optimizer.step()
# Update the learning rate.
scheduler.step()
# Calculate the average loss over all of the batches.
avg_train_loss = total_train_loss / len(train_dataloader)
# Measure how long this epoch took.
training_time = format_time(time.time() - t0)
print("")
print(" Average training loss: {0:.2f} ".format(avg_train_loss))
print(" Training epcoh took: {:} ".format(training_time))
# ========================================
# Validation
# ========================================
# After the completion of each training epoch, measure our performance on
# our validation set.
print("")
print("Running Validation...")
t0 = time.time()
# Put the model in evaluation mode--the dropout layers behave differently
# during evaluation.
model.eval()
# Tracking variables
total_eval_accuracy = 0
total_eval_loss = 0
nb_eval_steps = 0
predictions=[]
true_labels=[]
# Evaluate data for one epoch
for batch in tqdm(validation_dataloader):
# Unpack this training batch from our dataloader.
#
# As we unpack the batch, we'll also copy each tensor to the GPU using
# the `to` method.
#
# `batch` contains three pytorch tensors:
# [0]: input ids
# [1]: attention masks
# [2]: labels
b_img=batch[0].to(device)
b_input_id_cam = batch[1].to(device)
b_input_mask_cam = batch[2].to(device)
b_input_id_flau = batch[3].to(device)
b_input_mask_flau = batch[4].to(device)
b_labels = batch[5].to(device)
# Tell pytorch not to bother with constructing the compute graph during
# the forward pass, since this is only needed for backprop (training).
with torch.no_grad():
# Forward pass, calculate logit predictions.
# token_type_ids is the same as the "segment ids", which
# differentiates sentence 1 and 2 in 2-sentence tasks.
# The documentation for this `model` function is here:
# https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification
# Get the "logits" output by the model. The "logits" are the output
# values prior to applying an activation function like the softmax.
logits = model(b_img,b_input_id_cam ,b_input_mask_cam,b_input_id_flau,b_input_mask_flau)
#new
#defining the val loss
loss = loss_criterion(logits, b_labels)
# Accumulate the validation loss.
total_eval_loss += loss.item()
# Move logits and labels to CPU
logits = logits.detach().cpu().numpy()
# Move logits and labels to CPU
predicted_labels=np.argmax(logits,axis=1)
predictions.extend(predicted_labels)
label_ids = b_labels.to('cpu').numpy()
true_labels.extend(label_ids)
#saving the features_tr
# vec = vec.detach().cpu().numpy()
# vec_output_val.extend(vec)
# Calculate the accuracy for this batch of test sentences, and
# accumulate it over all batches.
total_eval_accuracy += flat_accuracy(logits, label_ids)
# Report the final accuracy for this validation run.
avg_val_accuracy = total_eval_accuracy / len(validation_dataloader)
print(" Accuracy: {0:.2f}".format(avg_val_accuracy))
# Calculate the average loss over all of the batches.
avg_val_loss = total_eval_loss / len(validation_dataloader)
# Measure how long the validation run took.
validation_time = format_time(time.time() - t0)
print(" Validation Loss: {0:.2f}".format(avg_val_loss))
print(" Validation took: {:}".format(validation_time))
print("Validation F1-Score: {}".format(f1_score(true_labels,predictions,average='macro')))
curr_f1=f1_score(true_labels,predictions,average='macro')
if curr_f1 > best_f1:
best_f1=curr_f1
torch.save(model.state_dict(), 'best_model.pt')
# np.save('best_vec_train_model_train.npy',vec_output_tr)
# np.save('best_vec_val.npy',vec_output_val)
# Record all statistics from this epoch.
# training_stats.append(
# {
# 'epoch': epoch_i + 1,
# 'Training Loss': avg_train_loss,
# 'Valid. Loss': avg_val_loss,
# 'Valid. Accur.': avg_val_accuracy,
# 'Training Time': training_time,
# 'Validation Time': validation_time
# }
# )
print("")
print("Training complete!")
print("Total training took {:} (h:mm:ss)".format(format_time(time.time()-total_t0)))
from sklearn.metrics import f1_score
print("Validation F1-Score: {}".format(f1_score(true_labels,predictions,average='macro')))
```
| github_jupyter |
# Optimization
Things to try:
- change the number of samples
- without and without bias
- with and without regularization
- changing the number of layers
- changing the amount of noise
- change number of degrees
- look at parameter values (high) in OLS
- tarin network for many epochs
```
from fastprogress.fastprogress import progress_bar
import torch
import matplotlib.pyplot as plt
from jupyterthemes import jtplot
jtplot.style(context="talk")
def plot_regression_data(model=None, MSE=None, poly_deg=0):
# Plot the noisy scatter points and the "true" function
plt.scatter(x_train, y_train, label="Noisy Samples")
plt.plot(x_true, y_true, "--", label="True Function")
# Plot the model's learned regression function
if model:
x = x_true.unsqueeze(-1)
x = x.pow(torch.arange(poly_deg + 1)) if poly_deg else x
with torch.no_grad():
yhat = model(x)
plt.plot(x_true, yhat, label="Learned Function")
plt.xlim([min_x, max_x])
plt.ylim([-5, 5])
plt.legend()
if MSE:
plt.title(f"MSE = ${MSE}$")
```
# Create Fake Training Data
```
def fake_y(x, add_noise=False):
y = 10 * x ** 3 - 5 * x
return y + torch.randn_like(y) * 0.5 if add_noise else y
N = 20
min_x, max_x = -1, 1
x_true = torch.linspace(min_x, max_x, 100)
y_true = fake_y(x_true)
x_train = torch.rand(N) * (max_x - min_x) + min_x
y_train = fake_y(x_train, add_noise=True)
plot_regression_data()
```
# Train A Simple Linear Model Using Batch GD
```
# Hyperparameters
learning_rate = 0.1
num_epochs = 100
# Model parameters
m = torch.randn(1, requires_grad=True)
b = torch.zeros(1, requires_grad=True)
params = (b, m)
# Torch utils
criterion = torch.nn.MSELoss()
optimizer = torch.optim.SGD(params, lr=learning_rate)
# Regression
for epoch in range(num_epochs):
# Model
yhat = m * x_train + b
# Update parameters
optimizer.zero_grad()
loss = criterion(yhat, y_train)
loss.backward()
optimizer.step()
plot_regression_data(lambda x: m * x + b, MSE=loss.item())
```
# Train Linear Regression Model Using Batch GD
```
# Hyperparameters
learning_rate = 0.1
num_epochs = 1000
# Model parameters
w2 = torch.randn(1, requires_grad=True)
w1 = torch.randn(1, requires_grad=True)
b = torch.zeros(1, requires_grad=True)
params = (b, w1, w2)
# Torch utils
criterion = torch.nn.MSELoss()
optimizer = torch.optim.SGD(params, lr=learning_rate)
# Regression
for epoch in range(num_epochs):
# Model
yhat = b + w1 * x_train + w2 * x_train ** 2
# Update parameters
optimizer.zero_grad()
loss = criterion(yhat, y_train)
loss.backward()
optimizer.step()
plot_regression_data(lambda x: b + w1 * x + w2 * x ** 2, MSE=loss.item())
```
# Train Complex Linear Regression Model Using Batch GD
```
# Hyperparameters
learning_rate = 0.1
num_epochs = 1000
# Model parameters
degrees = 50 # 3, 4, 16, 32, 64, 128
powers = torch.arange(degrees + 1)
x_poly = x_train.unsqueeze(-1).pow(powers)
params = torch.randn(degrees + 1, requires_grad=True)
# Torch utils
criterion = torch.nn.MSELoss()
optimizer = torch.optim.SGD([params], lr=learning_rate)
# Regression
for epoch in range(num_epochs):
# Model
yhat = x_poly @ params
# Update parameters
optimizer.zero_grad()
loss = criterion(yhat, y_train)
loss.backward()
optimizer.step()
plot_regression_data(lambda x: x @ params, poly_deg=degrees, MSE=loss.item())
params
```
# Compute Linear Regression Model Using Ordinary Least Squares
```
params = ((x_poly.T @ x_poly).inverse() @ x_poly.T) @ y_train
mse = torch.nn.functional.mse_loss(x_poly @ params, y_train)
plot_regression_data(lambda x: x @ params, poly_deg=degrees, MSE=mse)
# params
params
```
# Train Neural Network Model Using Batch GD
```
# Hyperparameters
learning_rate = 0.01
num_epochs = 100000
regularization = 1e-2
# Model parameters
model = torch.nn.Sequential(
torch.nn.Linear(1, 100),
torch.nn.ReLU(),
torch.nn.Linear(100, 100),
torch.nn.ReLU(),
torch.nn.Linear(100, 100),
torch.nn.ReLU(),
torch.nn.Linear(100, 1),
)
# Torch utils
criterion = torch.nn.MSELoss()
optimizer = torch.optim.SGD(
model.parameters(), lr=learning_rate, weight_decay=regularization
)
# Training
for epoch in progress_bar(range(num_epochs)):
# Model
yhat = model(x_train.unsqueeze(-1))
# Update parameters
optimizer.zero_grad()
loss = criterion(yhat.squeeze(), y_train)
loss.backward()
optimizer.step()
plot_regression_data(model, loss.item())
for param in model.parameters():
print(param.mean())
```
| github_jupyter |
## Hybrid Neural Net to solve Regression Problem
We use a neural net with a quantum layer to predict the second half betting lines given the result of the first half and the opening line. The quantum layer is an 8 qubit layer and the model is from Keras.
```
import pandas as pd
import numpy as np
import tensorflow as tf
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
import pennylane as qml
import warnings
warnings.filterwarnings('ignore')
tf.keras.backend.set_floatx('float64')
import warnings
warnings.filterwarnings('ignore')
###predict 2nd half line using 1st half total and open ##
df1 = pd.read_csv("nfl_odds.csv")
df1['1H'] = df1['1st'] + df1['2nd']
df2 = pd.read_csv('bet.csv')
df = df1.merge(df2, left_on = 'Team', right_on = 'Tm')
df = df[['1H','Open', 'TO%','PF','Yds','ML', '2H']]
df.head()
n_qubits = 8
dev = qml.device("default.qubit", wires=n_qubits)
@qml.qnode(dev)
def qnode(inputs, weights):
qml.templates.AngleEmbedding(inputs, wires=range(n_qubits))
qml.templates.BasicEntanglerLayers(weights, wires=range(n_qubits))
return [qml.expval(qml.PauliZ(wires=i)) for i in range(n_qubits)]
n_layers = 4
weight_shapes = {"weights": (n_layers, n_qubits)}
qlayer = qml.qnn.KerasLayer(qnode, weight_shapes, output_dim=n_qubits)
clayer_1 = tf.keras.layers.Dense(8, activation="relu")
clayer_2 = tf.keras.layers.Dense(2, activation="relu")
model = tf.keras.models.Sequential([clayer_1, qlayer, clayer_2])
opt = tf.keras.optimizers.SGD(learning_rate=0.2)
model.compile(opt, loss="mae", metrics=["mean_absolute_error"])
df = df[df.Open != 'pk']
df = df[df['2H'] != 'pk']
df['Open'] = df['Open'].astype(float)
df['2H'] = df['2H'].astype(float)
X = df[['1H','Open','TO%','PF','Yds','ML']]
y = df['2H']
X = np.asarray(X).astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15, random_state=0)
scaler = MinMaxScaler(feature_range = (0,1))
scaler.fit(X_train)
X_train = scaler.transform(X_train)
fitting = model.fit(X_train, y_train, epochs=10, batch_size=5, validation_split=0.15, verbose=2)
X_test = scaler.transform(X_test)
preds = model.predict(X_test)
pred = pd.DataFrame(preds, columns =[ 'prediction1', 'prediction2'])
pred = pred[(pred.prediction1 > 0) & (pred.prediction1 < 30)]
y_test = y_test.reset_index()
y_test = y_test[y_test['2H'] > 6]
compare = pd.concat([pred, y_test], axis=1)
compare = compare.drop('index', axis=1)
compare.dropna()
```
## Classical NN (Benchmarking)
The MAE is twice as large for the purely classical NN. The quantum layer is helping the solution converge more quickly! (As an aside, the quantum NN takes alot longer to run)
```
clayer_1 = tf.keras.layers.Dense(8, activation="relu")
clayer_2 = tf.keras.layers.Dense(2, activation="relu")
model = tf.keras.models.Sequential([clayer_1, clayer_2])
opt = tf.keras.optimizers.SGD(learning_rate=0.2)
model.compile(opt, loss="mae", metrics=["mean_absolute_error"])
df = df[df.Open != 'pk']
df = df[df['2H'] != 'pk']
df['Open'] = df['Open'].astype(float)
df['2H'] = df['2H'].astype(float)
X = df[['1H','Open','TO%','PF','Yds','ML']]
y = df['2H']
X = np.asarray(X).astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15, random_state=0)
scaler = MinMaxScaler(feature_range = (0,1))
scaler.fit(X_train)
X_train = scaler.transform(X_train)
fitting = model.fit(X_train, y_train, epochs=15, batch_size=10, validation_split=0.15, verbose=2)
```
| github_jupyter |
# 1-5.2 Python Intro
## conditionals, type, and mathematics extended
- conditionals: `elif`
- casting
- **basic math operators**
-----
><font size="5" color="#00A0B2" face="verdana"> <B>Student will be able to</B></font>
- code more than two choices using `elif`
- gather numeric input using type casting
- **perform subtraction, multiplication and division operations in code**
#
<font size="6" color="#00A0B2" face="verdana"> <B>Concepts</B></font>
## Math basic operators
### `+` addition
### `-` subtraction
### `*` multiplication
### `/` division
[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/5bc97f7e-3015-4178-ac20-371a5302def1/Unit1_Section5.2-Math-operators.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/5bc97f7e-3015-4178-ac20-371a5302def1/Unit1_Section5.2-Math-operators.vtt","srclang":"en","kind":"subtitles","label":"english"}])
#
<font size="6" color="#00A0B2" face="verdana"> <B>Examples</B></font>
```
# [ ] review and run example
print("3 + 5 =",3 + 5)
print("3 + 5 - 9 =", 3 + 5 - 9)
print("48/9 =", 48/9)
print("5*5 =", 5*5)
print("(14 - 8)*(19/4) =", (14 - 8)*(19/4))
# [ ] review and run example - 'million_maker'
def million_maker():
make_big = input("enter a non-decimal number you wish were bigger: ")
return int(make_big)*1000000
print("Now you have", million_maker())
```
#
<font size="6" color="#B24C00" face="verdana"> <B>Task 1</B></font>
## use math operators to solve the set of tasks below
```
# [ ] print the result of subtracting 15 from 43
print (43 - 15)
# [ ] print the result of multiplying 15 and 43
print (15*43)
# [ ] print the result of dividing 156 by 12
print (156/12)
# [ ] print the result of dividing 21 by 0.5
print (21/0.5)
# [ ] print the result of adding 111 plus 84 and then subtracting 45
print (111+84-45)
# [ ] print the result of adding 21 and 4 and then multiplying that sum by 4
print ((21+4)*4)
```
#
<font size="6" color="#B24C00" face="verdana"> <B>Task 2</B></font>
## Program: Multiplying Calculator Function
- define function **`multiply()`**, and within the function:
- gets user input() of 2 *strings* made of whole numbers
- cast the input to **`int()`**
- multiply the integers and **return** the equation with result as a **`str()`**
- **return** example
```python
9 * 13 = 117
```
```
# [ ] create and test multiply() function
def multiply():
num_1 = input ("Enter a whole number:")
num_2 = input ("Enter a second whole number:")
return (str(int(num_1)*int(num_2)))
print(multiply() + " is a string")
```
#
<font size="6" color="#B24C00" face="verdana"> <B>Task 3</B></font>
## Project: Improved Multiplying Calculator Function
### putting together conditionals, input casting and math
- #### update the multiply() function to multiply or divide
- single parameter is **`operator`** with arguments of **`*`** or **`/`** operator
- default operator is "*" (multiply)
- **return** the result of multiplication or division
- if operator other than **`"*"`** or **`"/"`** then **` return "Invalid Operator"`**
```
# [ ] create improved multiply() function and test with /, no argument, and an invalid operator ($)
def multiply(operator = "*"):
num_1 = input ("Enter a whole number:")
num_2 = input ("Enter a second whole number:")
if operator == "*":
return (str(int(num_1)*int(num_2)))
elif operator =="/":
return (str(int(num_1)/int(num_2)))
else:
print ("Corruption occurred")
ops = input("Would you like to multiply (m) or divide (d)?" )
if ops == "m":
print (multiply ("*"))
elif ops == "d":
print (multiply ("/"))
else:
print ("Invalid operator")
```
#
<font size="6" color="#B24C00" face="verdana"> <B>Task 4</B></font>
## Fix the Errors
```
# Review, run, fix
student_name = input("enter name: ").capitalize()
if student_name.startswith("F"):
print(student_name,"Congratulations, names starting with 'F' get to go first today!")
elif student_name.startswith("G"):
print(student_name,"Congratulations, names starting with 'G' get to go second today!")
else:
print(student_name, "please wait for students with names staring with 'F' and 'G' to go first today.")
```
[Terms of use](http://go.microsoft.com/fwlink/?LinkID=206977) [Privacy & cookies](https://go.microsoft.com/fwlink/?LinkId=521839) © 2017 Microsoft
| github_jupyter |
<a id='1'></a>
# 1. Import packages
```
from keras.models import Sequential, Model
from keras.layers import *
from keras.layers.advanced_activations import LeakyReLU
from keras.activations import relu
from keras.initializers import RandomNormal
from keras.applications import *
import keras.backend as K
from tensorflow.contrib.distributions import Beta
import tensorflow as tf
from keras.optimizers import Adam
from image_augmentation import random_transform
from image_augmentation import random_warp
from utils import get_image_paths, load_images, stack_images
from pixel_shuffler import PixelShuffler
import time
import numpy as np
from PIL import Image
import cv2
import glob
from random import randint, shuffle
from IPython.display import clear_output
from IPython.display import display
import matplotlib.pyplot as plt
%matplotlib inline
```
<a id='4'></a>
# 4. Config
mixup paper: https://arxiv.org/abs/1710.09412
Default training data directories: `./faceA/` and `./faceB/`
```
K.set_learning_phase(1)
channel_axis=-1
channel_first = False
IMAGE_SHAPE = (64, 64, 3)
nc_in = 3 # number of input channels of generators
nc_D_inp = 6 # number of input channels of discriminators
use_perceptual_loss = False
use_lsgan = True
use_instancenorm = False
use_mixup = True
mixup_alpha = 0.2 # 0.2
batchSize = 32
lrD = 1e-4 # Discriminator learning rate
lrG = 1e-4 # Generator learning rate
# Path of training images
img_dirA = './faceA/*.*'
img_dirB = './faceB/*.*'
```
<a id='5'></a>
# 5. Define models
```
from model_GAN_v2 import *
encoder = Encoder()
decoder_A = Decoder_ps()
decoder_B = Decoder_ps()
x = Input(shape=IMAGE_SHAPE)
netGA = Model(x, decoder_A(encoder(x)))
netGB = Model(x, decoder_B(encoder(x)))
netDA = Discriminator(nc_D_inp)
netDB = Discriminator(nc_D_inp)
```
<a id='6'></a>
# 6. Load Models
```
try:
encoder.load_weights("models/encoder.h5")
decoder_A.load_weights("models/decoder_A.h5")
decoder_B.load_weights("models/decoder_B.h5")
#netDA.load_weights("models/netDA.h5")
#netDB.load_weights("models/netDB.h5")
print ("model loaded.")
except:
print ("Weights file not found.")
pass
```
<a id='7'></a>
# 7. Define Inputs/Outputs Variables
distorted_A: A (batch_size, 64, 64, 3) tensor, input of generator_A (netGA).
distorted_B: A (batch_size, 64, 64, 3) tensor, input of generator_B (netGB).
fake_A: (batch_size, 64, 64, 3) tensor, output of generator_A (netGA).
fake_B: (batch_size, 64, 64, 3) tensor, output of generator_B (netGB).
mask_A: (batch_size, 64, 64, 1) tensor, mask output of generator_A (netGA).
mask_B: (batch_size, 64, 64, 1) tensor, mask output of generator_B (netGB).
path_A: A function that takes distorted_A as input and outputs fake_A.
path_B: A function that takes distorted_B as input and outputs fake_B.
path_mask_A: A function that takes distorted_A as input and outputs mask_A.
path_mask_B: A function that takes distorted_B as input and outputs mask_B.
path_abgr_A: A function that takes distorted_A as input and outputs concat([mask_A, fake_A]).
path_abgr_B: A function that takes distorted_B as input and outputs concat([mask_B, fake_B]).
real_A: A (batch_size, 64, 64, 3) tensor, target images for generator_A given input distorted_A.
real_B: A (batch_size, 64, 64, 3) tensor, target images for generator_B given input distorted_B.
```
def cycle_variables(netG):
distorted_input = netG.inputs[0]
fake_output = netG.outputs[0]
alpha = Lambda(lambda x: x[:,:,:, :1])(fake_output)
rgb = Lambda(lambda x: x[:,:,:, 1:])(fake_output)
masked_fake_output = alpha * rgb + (1-alpha) * distorted_input
fn_generate = K.function([distorted_input], [masked_fake_output])
fn_mask = K.function([distorted_input], [concatenate([alpha, alpha, alpha])])
fn_abgr = K.function([distorted_input], [concatenate([alpha, rgb])])
return distorted_input, fake_output, alpha, fn_generate, fn_mask, fn_abgr
distorted_A, fake_A, mask_A, path_A, path_mask_A, path_abgr_A = cycle_variables(netGA)
distorted_B, fake_B, mask_B, path_B, path_mask_B, path_abgr_B = cycle_variables(netGB)
real_A = Input(shape=IMAGE_SHAPE)
real_B = Input(shape=IMAGE_SHAPE)
```
<a id='11'></a>
# 11. Helper Function: face_swap()
This function is provided for those who don't have enough VRAM to run dlib's CNN and GAN model at the same time.
INPUTS:
img: A RGB face image of any size.
path_func: a function that is either path_abgr_A or path_abgr_B.
OUPUTS:
result_img: A RGB swapped face image after masking.
result_mask: A single channel uint8 mask image.
```
def swap_face(img, path_func):
input_size = img.shape
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) # generator expects BGR input
ae_input = cv2.resize(img, (64,64))/255. * 2 - 1
result = np.squeeze(np.array([path_func([[ae_input]])]))
result_a = result[:,:,0] * 255
result_bgr = np.clip( (result[:,:,1:] + 1) * 255 / 2, 0, 255 )
result_a = np.expand_dims(result_a, axis=2)
result = (result_a/255 * result_bgr + (1 - result_a/255) * ((ae_input + 1) * 255 / 2)).astype('uint8')
#result = np.clip( (result + 1) * 255 / 2, 0, 255 ).astype('uint8')
result = cv2.cvtColor(result, cv2.COLOR_BGR2RGB)
result = cv2.resize(result, (input_size[1],input_size[0]))
result_a = np.expand_dims(cv2.resize(result_a, (input_size[1],input_size[0])), axis=2)
return result, result_a
whom2whom = "BtoA" # default trainsforming faceB to faceA
if whom2whom is "AtoB":
path_func = path_abgr_B
elif whom2whom is "BtoA":
path_func = path_abgr_A
else:
print ("whom2whom should be either AtoB or BtoA")
input_img = plt.imread("./IMAGE_FILENAME.jpg")
plt.imshow(input_img)
result_img, result_mask = swap_face(input_img, path_func)
plt.imshow(result_img)
plt.imshow(result_mask[:, :, 0]) # cmap='gray'
```
| github_jupyter |
```
%load_ext autoreload
%autoreload
import numpy as np
import matplotlib.pyplot as plt
import os
import glob
from mirisim.config_parser import SimulatorConfig
from mirisim import MiriSimulation
import tso_img_datalabs_sim
from tso_img_datalabs_sim import wasp103_scene, wasp103_sim_config
from importlib import reload
```
In this notebook I'm going to generate simulated MIRI time series imaging data, to provide as test set for ESA Datalabs. To install Mirisim, see the [the public release webpage](http://miri.ster.kuleuven.be/bin/view/Public/MIRISim_Public). The target for the mock observations is WASP-103, an exoplanet host star with the following properties from [the exoplanet encyclopaedia](http://exoplanet.eu/catalog/wasp-103_b/):
* spectral type F8V
* T_bb = 6110 K
* V = 12.0, K = 10.7
K magnitude of 10.7 corresponds to a flux of 32.5 mJy or 32.5e3 microJy.
Using the ETC, I calculated the following number of groups for a high-SNR but unsaturated image:
* FULL array: NGROUPS = 5
* SUB64 subarray: NGROUPS = 60
We want to simulate a medium length exposure in both FULL and SUB64 subarras. In total that's 2 simulations.
| Sim no | Array | NGroups | NInt | NExp | Exp time |
| -------|---------| ---------|--------|--------|----------|
|1 |FULL | 5 | 200 | 1 | 0.77 hr |
|2 |SUB64 | 60 | 600 | 1 | 0.85 hr |
### Steps in setting up the simulation
This notebook will go through the following steps:
* Create the scene
* Set up the simulation
* Run the simulation
Each step has its own function. Steps 1 and 2 will each write out a .ini file, which will be used as input for the final step.
```
arr = ['FULL', 'SUB64']
ngrp = [5, 60]
#nints = [200, 600]
nints = [1, 1]
```
## Step 1: Creating the input scene (WASP-103)
Here we'll create the input scene for the simulations using the function wasp103_scene(). Arguments:
* scene_file: the filename for the .ini file
* write_cube: write the scene image out to a FITS file (optional; default=False)
The function returns a mirisim.skysim.scenes.CompositeSkyScene object.
```
scene_ini = wasp103_scene(scene_file='wasp103_scene.ini', write_cube=False)
print(scene_ini)
```
## Step 2: Configuring the simulation
Now I'll set up the simulations and prepare to run them. I'll set it up to loop through the 2 simulations. For this I wrote the function wasp103_sim_config. Check the docstring for descriptions and default values of the arguments.
The function will write out another .ini file containing the simulation configuration, and it returns the output filename for further use.
```
#reload(tso_img_sims_setup)
#from tso_img_sims_setup import wasp103_sim_config
for (a, g, i) in zip(arr, ngrp, nints):
sim_ini = wasp103_sim_config(mode='imaging', arr=a, ngrp=g, nint=i, nexp=1, filt='F770W',
scene_file=scene_ini, out=True)
print(sim_ini)
```
### Step 3: Run the simulation
In the following step we'll run the simulations for the 6 different cases. For each run, we need 3 input files: the scene, the simulation configuration, and the simulator setup file. The first and last of these remain the same for each run, and we loop through the list of 6 simulation config files.
After the simulation has run, the code renames the output directory to include the simulation settings to the directory.
```
cfg_files = glob.glob('*_simconfig.ini')
print(cfg_files)
# configure the simulator engine - this requires no editing from the default
simulator_config = SimulatorConfig.from_default()
for f in cfg_files[:1]:
tmp = f.split('.')
fcomps = tmp[0].split('_')
sim = MiriSimulation.from_configfiles(f)
sim.run()
outdir = sorted(glob.glob('*_*_mirisim'), key=os.path.getmtime )[-1]
new_outdir = 'wasp103_imtso_{0}_{1}_{2}'.format(fcomps[1], fcomps[2], outdir)
os.rename(outdir, new_outdir)
print(outdir, new_outdir)
```
### Step 3: Minor housekeeping to make the sim pipeline-ready
To make the MIRISim data ready for the TSO-specific pipeline, we have to make a couple of small changes to the data:
* add the TSOVISIT = TRUE to the primary header
* make sure the
| github_jupyter |
<a href="https://colab.research.google.com/github/HartmutD/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers/blob/master/Cluster_Feature_Importance.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Clustered Feature Importance
The goal of these notebook is demostrate the Clustered Feature Imporatance, a feature importance method suggested by **Dr. Marcos Lopez de Prado** in the [paper](https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3517595) and the book Machine Learning for Asset Managers. The aim of CFI is to cluster similar features and apply the feature importance analysis at the cluster level. This way clusters are mutually dissimilar and the method is tends tame the substitution effect and by using information theory along we can also reduce the multicollinearity of the dataset.
```
# General Imports
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score, log_loss
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.model_selection._split import KFold
# Import MlFinLab tools
import mlfinlab as ml
from mlfinlab.util.generate_dataset import get_classification_data
from mlfinlab.clustering.feature_clusters import get_feature_clusters
from mlfinlab.cross_validation import ml_cross_val_score
from mlfinlab.feature_importance import (mean_decrease_impurity, mean_decrease_accuracy,
plot_feature_importance)
from mlfinlab.clustering.onc import get_onc_clusters
```
**Clustered Feature Importance or CFI algorithm can be implemented in a two step process as mentioned in the book.**
## Step - 1 : Features Clustering
As first step we need to generate the clusters or subsets of features we want to analyse with feature importance methods. This can be done using feature cluster module of mlfinlab. It uses various parameters to generating feature clusters as in the book.
* The algorithm projects the observed features into a metric space by applying a dependence matric function either correlation based or information theory based. Information-theoretic metrics have the advantage of recognizing redundant features that are the result of nonlinear combinations of informative features (i.e. multicollinearity).
* Next, we need to determine the optimal number of clusters. The user can either specify the number cluster to use, this will apply a hierarchical clustering on the defined distance matrix of dependence matrix for a given linkage method for clustering, or the user can use the ONC algorithm which uses K-Means clustering, to automate the task of either getting the optimal number of clusters or get both optimal number of clusters and cluster compositions.
But the *caveat* of these process is that some silhouette scores may be low due one feature being a combination of multiple features across clusters. This is a problem, because ONC cannot assign one feature to multiple clusters. Hence, the following transformation may help reduce the multicollinearity of the system:
```
# Generating a synthetic dataset for testing
# We generate 40 features, 5 informative ('I_') , 30 redudent ('R_') and rest (5) noisy ('N_') features
# with 10000 rows of samples
# Redundent features are those which share large amount of information among each other and also with informative features
# That is the redudent features are those with substitution effect
X, y = get_classification_data(n_features=40, n_informative=5, n_redundant=30, n_samples=10000, sigma=0.1)
X.head(3)
# Now we get the feature clusters
dep_matrix = 'linear' # Linear correlation base dependence matric
# The n_cluster is set to None for getting the Optimal Number of Clusters using ONC Algorithm
clusters = get_feature_clusters(X, dependence_metric=dep_matrix, distance_metric=None, linkage_method=None, n_clusters=None)
clusters
```
As we can see that algorithm have not detected any features with low silhoutte score. So, there is no need replace the features with their residuals*. Now, that we have identified the number clusters (six in this case) and composition of features with in each cluster, we can move to the next step.
<br> ( *This will be discussed in the later part of this notebook)
## Step - 2 : Clustered Importance
Clustered Feature Importance can be implemented by simply passing the feature clusters obtained in Step-1 to the **clustered_subsets** argument of the MDI or MDA feature importance algorithm. We can apply MDI and MDA on groups of similar features, rather than on individual features and obtain the importance of the cluster as whole instead of individual features. This way we can anlayse how mutually dissimilar clusters interact with model and possibly isolate the noisy/non-infomative clusters.
```
# Setup for feature importance algorithm
# We define a classifier
clf_base = DecisionTreeClassifier(criterion='entropy', max_features=1, class_weight='balanced', min_weight_fraction_leaf=0)
clf = BaggingClassifier(base_estimator=clf_base, n_estimators=1000, max_features=1., max_samples=1.,
oob_score=True, n_jobs=-1)
# Fit the classifier
fit = clf.fit(X,y)
# Setting up cross-validation generator
# Use Purged K-Fold generator while using it on real financial dataset to avoid leakage
cvGen = KFold(n_splits=10)
oos_score = ml_cross_val_score(clf, X, y, cv_gen=cvGen, sample_weight_train=None, scoring=log_loss).mean()
```
### Clustered MDI
We compute the clustered MDI as the sum of the MDI values of the features that constitute that cluster. If there is one feature per cluster, then MDI and clustered MDI are the same.
```
clustered_mdi_imp = mean_decrease_impurity(clf,X.columns,clustered_subsets=clusters)
plot_feature_importance(clustered_mdi_imp,oob_score=clf.oob_score_, oos_score=oos_score,
save_fig=True, output_path='images/clustered_mdi.png')
```
As expected the clusters of non-informative features are given the least importnance and the clusters with redundent and informative features are placed above the noise cluster. This is very usefull for detecting features that are non-informative without the presence of some other features within the same cluster.
### Clustered MDA
As an extension to normal MDA to tackle multi-collinearity and (linear or non-linear) substitution effect. Its implementation was also discussed by Dr. Marcos Lopez de Prado in the Clustered Feature Importance [Presentaion Slides](https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3517595)
```
clustered_mda_imp = mean_decrease_accuracy(clf, X, y, cv_gen=cvGen, clustered_subsets=clusters,
scoring=log_loss)
plot_feature_importance(clustered_mda_imp,oob_score=clf.oob_score_, oos_score=oos_score,
save_fig=True, output_path='images/clustered_mda.png')
```
The clustered MDA has also correctly identified the noisy cluster and placed it below.
## The Caveat
Now that we saw how to implement the CFI with MDI and MDA, we have to discuss the *caveat* of normal ONC algorithm that was mentioned in the Step -1 of this notebook.
To understand the caveat of the normal ONC algorithm, we need a understanding of how it works. ONC finds the optimal number of clusters as well as the composition of those clusters, where each feature belongs to one and only one cluster. Features that
belong to the same cluster share a large amount of information, and features that belong to different clusters share only a relatively small amount of information.
<br>The consistency composition of the clusters are determined by the [silhouette score](https://en.wikipedia.org/wiki/Silhouette_(clustering)) of the features. The silhouette ranges from −1 to +1, where a high value indicates that the object is well matched to its own cluster and poorly matched to neighboring clusters. So, there may be some features with low silhouette score and this is a problem, because ONC cannot assign one feature to multiple clusters.
<br>In this case, the following transformation may help reduce the multicollinearity of the system :
For each cluster $k = 1 . . . K$, replace the features included in that cluster with residual features, so that it do not contain any information outside cluster $k$. That is let $D_{k}$ be the subset of index features $D = {1,...,F}$ included in cluster $k$, where $D_{k}\subset{D}\ , ||D_{k}|| > 0 \ , \forall{k}\ ; \ D_{k} \bigcap D_{l} = \Phi\ , \forall k \ne l\ ; \bigcup \limits _{k=1} ^{k} D_{k} = D$ . Then, for a given feature $X_{i}$ where $i \in D_{k}$, we compute the residual feature $\hat \varepsilon _{i}$ by fitting the following equation for regression -
$$X_{n,j} = \alpha _{i} + \sum \limits _{j \in \{ \bigcup _{l<k}\ D_{l} \} } \beta _{i,j} X_{n,j} + \varepsilon _{n,i}$$
Where $n = 1,....,N$ is the index of observations per feature. But if the degrees of freedom in the above regression is too low, one option is to use as regressors linear combinations of the features within each cluster by following a minimum variance weighting scheme so that only $K-1$ betas need to be estimated.
This transformation is not necessary if the silhouette scores clearly indicate that features belong to their respective clusters.
```
corr0, clstrs, silh = get_onc_clusters(X.corr(), repeat=3)
plt.figure(figsize=(16,9))
sns.heatmap(corr0,cmap='viridis');
silh
```
As we can see there is very low correlation among clusters. Hence, we need not to tranform anything in this dataset. The silhouette score also confirm the same, as there no features have silhouette score below zero.
Now let us artificially generate a dataset that can introduce features with low silhouette score. Here the sigmaStd argument of get_classification_data will help us to generate a dataset with high substitution effect.
```
# We set the value of sigmaStd to 4 to introduce high substitution effect
X_, y_ = get_classification_data(n_features=40, n_informative=5, n_redundant=30, n_samples=1000, sigma=5)
# Now lets check if we obtained our desired dataset
corr0, clstrs, silh = get_onc_clusters(X_.corr())
clstrs
```
Now, lets see if there is any features with low silhouette score. If yes then we can correct it with the transformation mentioned above (transformation is appiled automatically).
```
# This function has built-in detection property that detects the features with low silhouette score
# and corrects it with transformation
clusters = get_feature_clusters(X_, dependence_metric=dep_matrix, distance_metric=None,
linkage_method=None, n_clusters=None)
```
We have got the dataset with some features that has some negative silhouette score. Due to this all of noisy features are placed with the informative and redundent feature clusters. **This is the caveat of the ONC algorithm**
```
clusters
```
As we can see the composition after transformation has changed and now we have 3 clusters instead of 2. Though this is not perfect but it has done a must better job in clustering than the normal ONC algorithm. Also the get_feature_clusters function can detect the problem of low degree of freedom of the regression model used for generating the residual $\hat \varepsilon _{i}$ for replacing the orginal feature $X_{i}$ as mentioned above.
## Using Hierarchical Clustering
```
dist_matrix = 'angular' # Angular distance matric
linkage = 'single' # Linkage method for hierarchical clustering
clusters_ = get_feature_clusters(X, dependence_metric=dep_matrix, distance_metric=dist_matrix,
linkage_method=linkage, n_clusters=None)
clusters_
```
Using Heirarchical Clustering we get 6 clusters and 3 of them are with only single element and are non-informative features.
## Conclusion
At the end I would like say that the user can use different depedence matric including both correlation based and information theory based metric (like Information Variation which have the advantage of recognizing redundant features that are the result of nonlinear combinations of informative features). Also the user can use different linkage methods for hierarchical clustering or define the number of clusters.
## References
* Paper: https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3517595
* Book: Machine Learning for Asset Managers by Dr. Marcos Lopez De Prado
| github_jupyter |
# Unsplash Joint Query Search
Using this notebook you can search for images from the [Unsplash Dataset](https://unsplash.com/data) using natural language queries. The search is powered by OpenAI's [CLIP](https://github.com/openai/CLIP) neural network.
This notebook uses the precomputed feature vectors for almost 2 million images from the full version of the [Unsplash Dataset](https://unsplash.com/data). If you want to compute the features yourself, see [here](https://github.com/haltakov/natural-language-image-search#on-your-machine).
This project was mostly based on the [project](https://github.com/haltakov/natural-language-image-search) created by [Vladimir Haltakov](https://twitter.com/haltakov) and the full code is open-sourced on [GitHub](https://github.com/haofanwang/natural-language-joint-query-search).
```
!git clone https://github.com/haofanwang/natural-language-joint-query-search.git
cd natural-language-joint-query-search
```
## Setup Environment
In this section we will setup the environment.
First we need to install CLIP and then upgrade the version of torch to 1.7.1 with CUDA support (by default CLIP installs torch 1.7.1 without CUDA). Google Colab currently has torch 1.7.0 which doesn't work well with CLIP.
```
!pip install torch==1.7.1+cu101 torchvision==0.8.2+cu101 -f https://download.pytorch.org/whl/torch_stable.html
!pip install ftfy regex tqdm
```
## Download the Precomputed Data
In this section the precomputed feature vectors for all photos are downloaded.
In order to compare the photos from the Unsplash dataset to a text query, we need to compute the feature vector of each photo using CLIP.
We need to download two files:
* `photo_ids.csv` - a list of the photo IDs for all images in the dataset. The photo ID can be used to get the actual photo from Unsplash.
* `features.npy` - a matrix containing the precomputed 512 element feature vector for each photo in the dataset.
The files are available on [Google Drive](https://drive.google.com/drive/folders/1WQmedVCDIQKA2R33dkS1f980YsJXRZ-q?usp=sharing).
```
from pathlib import Path
# Create a folder for the precomputed features
!mkdir unsplash-dataset
# Download the photo IDs and the feature vectors
!gdown --id 1FdmDEzBQCf3OxqY9SbU-jLfH_yZ6UPSj -O unsplash-dataset/photo_ids.csv
!gdown --id 1L7ulhn4VeN-2aOM-fYmljza_TQok-j9F -O unsplash-dataset/features.npy
# Download from alternative source, if the download doesn't work for some reason (for example download quota limit exceeded)
if not Path('unsplash-dataset/photo_ids.csv').exists():
!wget https://transfer.army/api/download/TuWWFTe2spg/EDm6KBjc -O unsplash-dataset/photo_ids.csv
if not Path('unsplash-dataset/features.npy').exists():
!wget https://transfer.army/api/download/LGXAaiNnMLA/AamL9PpU -O unsplash-dataset/features.npy
```
## Define Functions
Some important functions from CLIP for processing the data are defined here.
The `encode_search_query` function takes a text description and encodes it into a feature vector using the CLIP model.
```
def encode_search_query(search_query):
with torch.no_grad():
# Encode and normalize the search query using CLIP
text_encoded, weight = model.encode_text(clip.tokenize(search_query).to(device))
text_encoded /= text_encoded.norm(dim=-1, keepdim=True)
# Retrieve the feature vector from the GPU and convert it to a numpy array
return text_encoded.cpu().numpy()
```
The `find_best_matches` function compares the text feature vector to the feature vectors of all images and finds the best matches. The function returns the IDs of the best matching photos.
```
def find_best_matches(text_features, photo_features, photo_ids, results_count=3):
# Compute the similarity between the search query and each photo using the Cosine similarity
similarities = (photo_features @ text_features.T).squeeze(1)
# Sort the photos by their similarity score
best_photo_idx = (-similarities).argsort()
# Return the photo IDs of the best matches
return [photo_ids[i] for i in best_photo_idx[:results_count]]
```
We can load the pretrained public CLIP model.
```
import torch
from CLIP.clip import clip
# Load the open CLIP model
device = "cuda" if torch.cuda.is_available() else "cpu"
model, preprocess = clip.load("ViT-B/32", device=device, jit=False)
```
We can now load the pre-extracted unsplash image features.
```
import pandas as pd
import numpy as np
# Load the photo IDs
photo_ids = pd.read_csv("unsplash-dataset/photo_ids.csv")
photo_ids = list(photo_ids['photo_id'])
# Load the features vectors
photo_features = np.load("unsplash-dataset/features.npy")
# Print some statistics
print(f"Photos loaded: {len(photo_ids)}")
```
## Search Unsplash
Now we are ready to search the dataset using natural language. Check out the examples below and feel free to try out your own queries.
In this project, we support more types of searching than the [original project](https://github.com/haltakov/natural-language-image-search).
1. Text-to-Image Search
2. Image-to-Image Search
3. Text+Text-to-Image Search
4. Image+Text-to-Image Search
Note:
1. As the Unsplash API limit is hit from time to time, we don't display the image, but show the link to download the image.
2. As the pretrained CLIP model is mainly trained with English texts, if you want to try with different language, please use Google translation API or NMT model to translate first.
### Text-to-Image Search
#### "Tokyo Tower at night"
```
search_query = "Tokyo Tower at night."
text_features = encode_search_query(search_query)
# Find the best matches
best_photo_ids = find_best_matches(text_features, photo_features, photo_ids, 5)
for photo_id in best_photo_ids:
print("https://unsplash.com/photos/{}/download".format(photo_id))
```
#### "Two children are playing in the amusement park."
```
search_query = "Two children are playing in the amusement park."
text_features = encode_search_query(search_query)
# Find the best matches
best_photo_ids = find_best_matches(text_features, photo_features, photo_ids, 5)
for photo_id in best_photo_ids:
print("https://unsplash.com/photos/{}/download".format(photo_id))
```
### Image-to-Image Search
```
from PIL import Image
source_image = "./images/borna-hrzina-8IPrifbjo-0-unsplash.jpg"
with torch.no_grad():
image_feature = model.encode_image(preprocess(Image.open(source_image)).unsqueeze(0).to(device))
image_feature = (image_feature / image_feature.norm(dim=-1, keepdim=True)).cpu().numpy()
# Find the best matches
best_photo_ids = find_best_matches(image_feature, photo_features, photo_ids, 5)
for photo_id in best_photo_ids:
print("https://unsplash.com/photos/{}/download".format(photo_id))
```
### Text+Text-to-Image Search
```
search_query = "red flower"
search_query_extra = "blue sky"
text_features = encode_search_query(search_query)
text_features_extra = encode_search_query(search_query_extra)
mixed_features = text_features + text_features_extra
# Find the best matches
best_photo_ids = find_best_matches(mixed_features, photo_features, photo_ids, 5)
for photo_id in best_photo_ids:
print("https://unsplash.com/photos/{}/download".format(photo_id))
```
### Image+Text-to-Image Search
```
source_image = "./images/borna-hrzina-8IPrifbjo-0-unsplash.jpg"
search_text = "cars"
with torch.no_grad():
image_feature = model.encode_image(preprocess(Image.open(source_image)).unsqueeze(0).to(device))
image_feature = (image_feature / image_feature.norm(dim=-1, keepdim=True)).cpu().numpy()
text_feature = encode_search_query(search_text)
# image + text
modified_feature = image_feature + text_feature
best_photo_ids = find_best_matches(modified_feature, photo_features, photo_ids, 5)
for photo_id in best_photo_ids:
print("https://unsplash.com/photos/{}/download".format(photo_id))
```
| github_jupyter |
# RadarCOVID-Report
## Data Extraction
```
import datetime
import json
import logging
import os
import shutil
import tempfile
import textwrap
import uuid
import matplotlib.pyplot as plt
import matplotlib.ticker
import numpy as np
import pandas as pd
import pycountry
import retry
import seaborn as sns
%matplotlib inline
current_working_directory = os.environ.get("PWD")
if current_working_directory:
os.chdir(current_working_directory)
sns.set()
matplotlib.rcParams["figure.figsize"] = (15, 6)
extraction_datetime = datetime.datetime.utcnow()
extraction_date = extraction_datetime.strftime("%Y-%m-%d")
extraction_previous_datetime = extraction_datetime - datetime.timedelta(days=1)
extraction_previous_date = extraction_previous_datetime.strftime("%Y-%m-%d")
extraction_date_with_hour = datetime.datetime.utcnow().strftime("%Y-%m-%d@%H")
current_hour = datetime.datetime.utcnow().hour
are_today_results_partial = current_hour != 23
```
### Constants
```
from Modules.ExposureNotification import exposure_notification_io
spain_region_country_code = "ES"
germany_region_country_code = "DE"
default_backend_identifier = spain_region_country_code
backend_generation_days = 7 * 2
daily_summary_days = 7 * 4 * 3
daily_plot_days = 7 * 4
tek_dumps_load_limit = daily_summary_days + 1
```
### Parameters
```
environment_backend_identifier = os.environ.get("RADARCOVID_REPORT__BACKEND_IDENTIFIER")
if environment_backend_identifier:
report_backend_identifier = environment_backend_identifier
else:
report_backend_identifier = default_backend_identifier
report_backend_identifier
environment_enable_multi_backend_download = \
os.environ.get("RADARCOVID_REPORT__ENABLE_MULTI_BACKEND_DOWNLOAD")
if environment_enable_multi_backend_download:
report_backend_identifiers = None
else:
report_backend_identifiers = [report_backend_identifier]
report_backend_identifiers
environment_invalid_shared_diagnoses_dates = \
os.environ.get("RADARCOVID_REPORT__INVALID_SHARED_DIAGNOSES_DATES")
if environment_invalid_shared_diagnoses_dates:
invalid_shared_diagnoses_dates = environment_invalid_shared_diagnoses_dates.split(",")
else:
invalid_shared_diagnoses_dates = []
invalid_shared_diagnoses_dates
```
### COVID-19 Cases
```
report_backend_client = \
exposure_notification_io.get_backend_client_with_identifier(
backend_identifier=report_backend_identifier)
@retry.retry(tries=10, delay=10, backoff=1.1, jitter=(0, 10))
def download_cases_dataframe():
return pd.read_csv("https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv")
confirmed_df_ = download_cases_dataframe()
confirmed_df_.iloc[0]
confirmed_df = confirmed_df_.copy()
confirmed_df = confirmed_df[["date", "new_cases", "iso_code"]]
confirmed_df.rename(
columns={
"date": "sample_date",
"iso_code": "country_code",
},
inplace=True)
def convert_iso_alpha_3_to_alpha_2(x):
try:
return pycountry.countries.get(alpha_3=x).alpha_2
except Exception as e:
logging.info(f"Error converting country ISO Alpha 3 code '{x}': {repr(e)}")
return None
confirmed_df["country_code"] = confirmed_df.country_code.apply(convert_iso_alpha_3_to_alpha_2)
confirmed_df.dropna(inplace=True)
confirmed_df["sample_date"] = pd.to_datetime(confirmed_df.sample_date, dayfirst=True)
confirmed_df["sample_date"] = confirmed_df.sample_date.dt.strftime("%Y-%m-%d")
confirmed_df.sort_values("sample_date", inplace=True)
confirmed_df.tail()
confirmed_days = pd.date_range(
start=confirmed_df.iloc[0].sample_date,
end=extraction_datetime)
confirmed_days_df = pd.DataFrame(data=confirmed_days, columns=["sample_date"])
confirmed_days_df["sample_date_string"] = \
confirmed_days_df.sample_date.dt.strftime("%Y-%m-%d")
confirmed_days_df.tail()
def sort_source_regions_for_display(source_regions: list) -> list:
if report_backend_identifier in source_regions:
source_regions = [report_backend_identifier] + \
list(sorted(set(source_regions).difference([report_backend_identifier])))
else:
source_regions = list(sorted(source_regions))
return source_regions
report_source_regions = report_backend_client.source_regions_for_date(
date=extraction_datetime.date())
report_source_regions = sort_source_regions_for_display(
source_regions=report_source_regions)
report_source_regions
def get_cases_dataframe(source_regions_for_date_function, columns_suffix=None):
source_regions_at_date_df = confirmed_days_df.copy()
source_regions_at_date_df["source_regions_at_date"] = \
source_regions_at_date_df.sample_date.apply(
lambda x: source_regions_for_date_function(date=x))
source_regions_at_date_df.sort_values("sample_date", inplace=True)
source_regions_at_date_df["_source_regions_group"] = source_regions_at_date_df. \
source_regions_at_date.apply(lambda x: ",".join(sort_source_regions_for_display(x)))
source_regions_at_date_df.tail()
#%%
source_regions_for_summary_df_ = \
source_regions_at_date_df[["sample_date", "_source_regions_group"]].copy()
source_regions_for_summary_df_.rename(columns={"_source_regions_group": "source_regions"}, inplace=True)
source_regions_for_summary_df_.tail()
#%%
confirmed_output_columns = ["sample_date", "new_cases", "covid_cases"]
confirmed_output_df = pd.DataFrame(columns=confirmed_output_columns)
for source_regions_group, source_regions_group_series in \
source_regions_at_date_df.groupby("_source_regions_group"):
source_regions_set = set(source_regions_group.split(","))
confirmed_source_regions_set_df = \
confirmed_df[confirmed_df.country_code.isin(source_regions_set)].copy()
confirmed_source_regions_group_df = \
confirmed_source_regions_set_df.groupby("sample_date").new_cases.sum() \
.reset_index().sort_values("sample_date")
confirmed_source_regions_group_df = \
confirmed_source_regions_group_df.merge(
confirmed_days_df[["sample_date_string"]].rename(
columns={"sample_date_string": "sample_date"}),
how="right")
confirmed_source_regions_group_df["new_cases"] = \
confirmed_source_regions_group_df["new_cases"].clip(lower=0)
confirmed_source_regions_group_df["covid_cases"] = \
confirmed_source_regions_group_df.new_cases.rolling(7, min_periods=0).mean().round()
confirmed_source_regions_group_df = \
confirmed_source_regions_group_df[confirmed_output_columns]
confirmed_source_regions_group_df = confirmed_source_regions_group_df.replace(0, np.nan)
confirmed_source_regions_group_df.fillna(method="ffill", inplace=True)
confirmed_source_regions_group_df = \
confirmed_source_regions_group_df[
confirmed_source_regions_group_df.sample_date.isin(
source_regions_group_series.sample_date_string)]
confirmed_output_df = confirmed_output_df.append(confirmed_source_regions_group_df)
result_df = confirmed_output_df.copy()
result_df.tail()
#%%
result_df.rename(columns={"sample_date": "sample_date_string"}, inplace=True)
result_df = confirmed_days_df[["sample_date_string"]].merge(result_df, how="left")
result_df.sort_values("sample_date_string", inplace=True)
result_df.fillna(method="ffill", inplace=True)
result_df.tail()
#%%
result_df[["new_cases", "covid_cases"]].plot()
if columns_suffix:
result_df.rename(
columns={
"new_cases": "new_cases_" + columns_suffix,
"covid_cases": "covid_cases_" + columns_suffix},
inplace=True)
return result_df, source_regions_for_summary_df_
confirmed_eu_df, source_regions_for_summary_df = get_cases_dataframe(
report_backend_client.source_regions_for_date)
confirmed_es_df, _ = get_cases_dataframe(
lambda date: [spain_region_country_code],
columns_suffix=spain_region_country_code.lower())
```
### Extract API TEKs
```
raw_zip_path_prefix = "Data/TEKs/Raw/"
base_backend_identifiers = [report_backend_identifier]
multi_backend_exposure_keys_df = \
exposure_notification_io.download_exposure_keys_from_backends(
backend_identifiers=report_backend_identifiers,
generation_days=backend_generation_days,
fail_on_error_backend_identifiers=base_backend_identifiers,
save_raw_zip_path_prefix=raw_zip_path_prefix)
multi_backend_exposure_keys_df["region"] = multi_backend_exposure_keys_df["backend_identifier"]
multi_backend_exposure_keys_df.rename(
columns={
"generation_datetime": "sample_datetime",
"generation_date_string": "sample_date_string",
},
inplace=True)
multi_backend_exposure_keys_df.head()
early_teks_df = multi_backend_exposure_keys_df[
multi_backend_exposure_keys_df.rolling_period < 144].copy()
early_teks_df["rolling_period_in_hours"] = early_teks_df.rolling_period / 6
early_teks_df[early_teks_df.sample_date_string != extraction_date] \
.rolling_period_in_hours.hist(bins=list(range(24)))
early_teks_df[early_teks_df.sample_date_string == extraction_date] \
.rolling_period_in_hours.hist(bins=list(range(24)))
multi_backend_exposure_keys_df = multi_backend_exposure_keys_df[[
"sample_date_string", "region", "key_data"]]
multi_backend_exposure_keys_df.head()
active_regions = \
multi_backend_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist()
active_regions
multi_backend_summary_df = multi_backend_exposure_keys_df.groupby(
["sample_date_string", "region"]).key_data.nunique().reset_index() \
.pivot(index="sample_date_string", columns="region") \
.sort_index(ascending=False)
multi_backend_summary_df.rename(
columns={"key_data": "shared_teks_by_generation_date"},
inplace=True)
multi_backend_summary_df.rename_axis("sample_date", inplace=True)
multi_backend_summary_df = multi_backend_summary_df.fillna(0).astype(int)
multi_backend_summary_df = multi_backend_summary_df.head(backend_generation_days)
multi_backend_summary_df.head()
def compute_keys_cross_sharing(x):
teks_x = x.key_data_x.item()
common_teks = set(teks_x).intersection(x.key_data_y.item())
common_teks_fraction = len(common_teks) / len(teks_x)
return pd.Series(dict(
common_teks=common_teks,
common_teks_fraction=common_teks_fraction,
))
multi_backend_exposure_keys_by_region_df = \
multi_backend_exposure_keys_df.groupby("region").key_data.unique().reset_index()
multi_backend_exposure_keys_by_region_df["_merge"] = True
multi_backend_exposure_keys_by_region_combination_df = \
multi_backend_exposure_keys_by_region_df.merge(
multi_backend_exposure_keys_by_region_df, on="_merge")
multi_backend_exposure_keys_by_region_combination_df.drop(
columns=["_merge"], inplace=True)
if multi_backend_exposure_keys_by_region_combination_df.region_x.nunique() > 1:
multi_backend_exposure_keys_by_region_combination_df = \
multi_backend_exposure_keys_by_region_combination_df[
multi_backend_exposure_keys_by_region_combination_df.region_x !=
multi_backend_exposure_keys_by_region_combination_df.region_y]
multi_backend_exposure_keys_cross_sharing_df = \
multi_backend_exposure_keys_by_region_combination_df \
.groupby(["region_x", "region_y"]) \
.apply(compute_keys_cross_sharing) \
.reset_index()
multi_backend_cross_sharing_summary_df = \
multi_backend_exposure_keys_cross_sharing_df.pivot_table(
values=["common_teks_fraction"],
columns="region_x",
index="region_y",
aggfunc=lambda x: x.item())
multi_backend_cross_sharing_summary_df
multi_backend_without_active_region_exposure_keys_df = \
multi_backend_exposure_keys_df[multi_backend_exposure_keys_df.region != report_backend_identifier]
multi_backend_without_active_region = \
multi_backend_without_active_region_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist()
multi_backend_without_active_region
exposure_keys_summary_df = multi_backend_exposure_keys_df[
multi_backend_exposure_keys_df.region == report_backend_identifier]
exposure_keys_summary_df.drop(columns=["region"], inplace=True)
exposure_keys_summary_df = \
exposure_keys_summary_df.groupby(["sample_date_string"]).key_data.nunique().to_frame()
exposure_keys_summary_df = \
exposure_keys_summary_df.reset_index().set_index("sample_date_string")
exposure_keys_summary_df.sort_index(ascending=False, inplace=True)
exposure_keys_summary_df.rename(columns={"key_data": "shared_teks_by_generation_date"}, inplace=True)
exposure_keys_summary_df.head()
```
### Dump API TEKs
```
tek_list_df = multi_backend_exposure_keys_df[
["sample_date_string", "region", "key_data"]].copy()
tek_list_df["key_data"] = tek_list_df["key_data"].apply(str)
tek_list_df.rename(columns={
"sample_date_string": "sample_date",
"key_data": "tek_list"}, inplace=True)
tek_list_df = tek_list_df.groupby(
["sample_date", "region"]).tek_list.unique().reset_index()
tek_list_df["extraction_date"] = extraction_date
tek_list_df["extraction_date_with_hour"] = extraction_date_with_hour
tek_list_path_prefix = "Data/TEKs/"
tek_list_current_path = tek_list_path_prefix + f"/Current/RadarCOVID-TEKs.json"
tek_list_daily_path = tek_list_path_prefix + f"Daily/RadarCOVID-TEKs-{extraction_date}.json"
tek_list_hourly_path = tek_list_path_prefix + f"Hourly/RadarCOVID-TEKs-{extraction_date_with_hour}.json"
for path in [tek_list_current_path, tek_list_daily_path, tek_list_hourly_path]:
os.makedirs(os.path.dirname(path), exist_ok=True)
tek_list_base_df = tek_list_df[tek_list_df.region == report_backend_identifier]
tek_list_base_df.drop(columns=["extraction_date", "extraction_date_with_hour"]).to_json(
tek_list_current_path,
lines=True, orient="records")
tek_list_base_df.drop(columns=["extraction_date_with_hour"]).to_json(
tek_list_daily_path,
lines=True, orient="records")
tek_list_base_df.to_json(
tek_list_hourly_path,
lines=True, orient="records")
tek_list_base_df.head()
```
### Load TEK Dumps
```
import glob
def load_extracted_teks(mode, region=None, limit=None) -> pd.DataFrame:
extracted_teks_df = pd.DataFrame(columns=["region"])
file_paths = list(reversed(sorted(glob.glob(tek_list_path_prefix + mode + "/RadarCOVID-TEKs-*.json"))))
if limit:
file_paths = file_paths[:limit]
for file_path in file_paths:
logging.info(f"Loading TEKs from '{file_path}'...")
iteration_extracted_teks_df = pd.read_json(file_path, lines=True)
extracted_teks_df = extracted_teks_df.append(
iteration_extracted_teks_df, sort=False)
extracted_teks_df["region"] = \
extracted_teks_df.region.fillna(spain_region_country_code).copy()
if region:
extracted_teks_df = \
extracted_teks_df[extracted_teks_df.region == region]
return extracted_teks_df
daily_extracted_teks_df = load_extracted_teks(
mode="Daily",
region=report_backend_identifier,
limit=tek_dumps_load_limit)
daily_extracted_teks_df.head()
exposure_keys_summary_df_ = daily_extracted_teks_df \
.sort_values("extraction_date", ascending=False) \
.groupby("sample_date").tek_list.first() \
.to_frame()
exposure_keys_summary_df_.index.name = "sample_date_string"
exposure_keys_summary_df_["tek_list"] = \
exposure_keys_summary_df_.tek_list.apply(len)
exposure_keys_summary_df_ = exposure_keys_summary_df_ \
.rename(columns={"tek_list": "shared_teks_by_generation_date"}) \
.sort_index(ascending=False)
exposure_keys_summary_df = exposure_keys_summary_df_
exposure_keys_summary_df.head()
```
### Daily New TEKs
```
tek_list_df = daily_extracted_teks_df.groupby("extraction_date").tek_list.apply(
lambda x: set(sum(x, []))).reset_index()
tek_list_df = tek_list_df.set_index("extraction_date").sort_index(ascending=True)
tek_list_df.head()
def compute_teks_by_generation_and_upload_date(date):
day_new_teks_set_df = tek_list_df.copy().diff()
try:
day_new_teks_set = day_new_teks_set_df[
day_new_teks_set_df.index == date].tek_list.item()
except ValueError:
day_new_teks_set = None
if pd.isna(day_new_teks_set):
day_new_teks_set = set()
day_new_teks_df = daily_extracted_teks_df[
daily_extracted_teks_df.extraction_date == date].copy()
day_new_teks_df["shared_teks"] = \
day_new_teks_df.tek_list.apply(lambda x: set(x).intersection(day_new_teks_set))
day_new_teks_df["shared_teks"] = \
day_new_teks_df.shared_teks.apply(len)
day_new_teks_df["upload_date"] = date
day_new_teks_df.rename(columns={"sample_date": "generation_date"}, inplace=True)
day_new_teks_df = day_new_teks_df[
["upload_date", "generation_date", "shared_teks"]]
day_new_teks_df["generation_to_upload_days"] = \
(pd.to_datetime(day_new_teks_df.upload_date) -
pd.to_datetime(day_new_teks_df.generation_date)).dt.days
day_new_teks_df = day_new_teks_df[day_new_teks_df.shared_teks > 0]
return day_new_teks_df
shared_teks_generation_to_upload_df = pd.DataFrame()
for upload_date in daily_extracted_teks_df.extraction_date.unique():
shared_teks_generation_to_upload_df = \
shared_teks_generation_to_upload_df.append(
compute_teks_by_generation_and_upload_date(date=upload_date))
shared_teks_generation_to_upload_df \
.sort_values(["upload_date", "generation_date"], ascending=False, inplace=True)
shared_teks_generation_to_upload_df.tail()
today_new_teks_df = \
shared_teks_generation_to_upload_df[
shared_teks_generation_to_upload_df.upload_date == extraction_date].copy()
today_new_teks_df.tail()
if not today_new_teks_df.empty:
today_new_teks_df.set_index("generation_to_upload_days") \
.sort_index().shared_teks.plot.bar()
generation_to_upload_period_pivot_df = \
shared_teks_generation_to_upload_df[
["upload_date", "generation_to_upload_days", "shared_teks"]] \
.pivot(index="upload_date", columns="generation_to_upload_days") \
.sort_index(ascending=False).fillna(0).astype(int) \
.droplevel(level=0, axis=1)
generation_to_upload_period_pivot_df.head()
new_tek_df = tek_list_df.diff().tek_list.apply(
lambda x: len(x) if not pd.isna(x) else None).to_frame().reset_index()
new_tek_df.rename(columns={
"tek_list": "shared_teks_by_upload_date",
"extraction_date": "sample_date_string",}, inplace=True)
new_tek_df.tail()
shared_teks_uploaded_on_generation_date_df = shared_teks_generation_to_upload_df[
shared_teks_generation_to_upload_df.generation_to_upload_days == 0] \
[["upload_date", "shared_teks"]].rename(
columns={
"upload_date": "sample_date_string",
"shared_teks": "shared_teks_uploaded_on_generation_date",
})
shared_teks_uploaded_on_generation_date_df.head()
estimated_shared_diagnoses_df = shared_teks_generation_to_upload_df \
.groupby(["upload_date"]).shared_teks.max().reset_index() \
.sort_values(["upload_date"], ascending=False) \
.rename(columns={
"upload_date": "sample_date_string",
"shared_teks": "shared_diagnoses",
})
invalid_shared_diagnoses_dates_mask = \
estimated_shared_diagnoses_df.sample_date_string.isin(invalid_shared_diagnoses_dates)
estimated_shared_diagnoses_df[invalid_shared_diagnoses_dates_mask] = 0
estimated_shared_diagnoses_df.head()
```
### Hourly New TEKs
```
hourly_extracted_teks_df = load_extracted_teks(
mode="Hourly", region=report_backend_identifier, limit=25)
hourly_extracted_teks_df.head()
hourly_new_tek_count_df = hourly_extracted_teks_df \
.groupby("extraction_date_with_hour").tek_list. \
apply(lambda x: set(sum(x, []))).reset_index().copy()
hourly_new_tek_count_df = hourly_new_tek_count_df.set_index("extraction_date_with_hour") \
.sort_index(ascending=True)
hourly_new_tek_count_df["new_tek_list"] = hourly_new_tek_count_df.tek_list.diff()
hourly_new_tek_count_df["new_tek_count"] = hourly_new_tek_count_df.new_tek_list.apply(
lambda x: len(x) if not pd.isna(x) else 0)
hourly_new_tek_count_df.rename(columns={
"new_tek_count": "shared_teks_by_upload_date"}, inplace=True)
hourly_new_tek_count_df = hourly_new_tek_count_df.reset_index()[[
"extraction_date_with_hour", "shared_teks_by_upload_date"]]
hourly_new_tek_count_df.head()
hourly_summary_df = hourly_new_tek_count_df.copy()
hourly_summary_df.set_index("extraction_date_with_hour", inplace=True)
hourly_summary_df = hourly_summary_df.fillna(0).astype(int).reset_index()
hourly_summary_df["datetime_utc"] = pd.to_datetime(
hourly_summary_df.extraction_date_with_hour, format="%Y-%m-%d@%H")
hourly_summary_df.set_index("datetime_utc", inplace=True)
hourly_summary_df = hourly_summary_df.tail(-1)
hourly_summary_df.head()
```
### Official Statistics
```
import requests
import pandas.io.json
official_stats_response = requests.get("https://radarcovid.covid19.gob.es/kpi/statistics/basics")
official_stats_response.raise_for_status()
official_stats_df_ = pandas.io.json.json_normalize(official_stats_response.json())
official_stats_df = official_stats_df_.copy()
official_stats_df["date"] = pd.to_datetime(official_stats_df["date"], dayfirst=True)
official_stats_df.head()
official_stats_column_map = {
"date": "sample_date",
"applicationsDownloads.totalAcummulated": "app_downloads_es_accumulated",
"communicatedContagions.totalAcummulated": "shared_diagnoses_es_accumulated",
}
accumulated_suffix = "_accumulated"
accumulated_values_columns = \
list(filter(lambda x: x.endswith(accumulated_suffix), official_stats_column_map.values()))
interpolated_values_columns = \
list(map(lambda x: x[:-len(accumulated_suffix)], accumulated_values_columns))
official_stats_df = \
official_stats_df[official_stats_column_map.keys()] \
.rename(columns=official_stats_column_map)
official_stats_df["extraction_date"] = extraction_date
official_stats_df.head()
official_stats_path = "Data/Statistics/Current/RadarCOVID-Statistics.json"
previous_official_stats_df = pd.read_json(official_stats_path, orient="records", lines=True)
previous_official_stats_df["sample_date"] = pd.to_datetime(previous_official_stats_df["sample_date"], dayfirst=True)
official_stats_df = official_stats_df.append(previous_official_stats_df)
official_stats_df.head()
official_stats_df = official_stats_df[~(official_stats_df.shared_diagnoses_es_accumulated == 0)]
official_stats_df.sort_values("extraction_date", ascending=False, inplace=True)
official_stats_df.drop_duplicates(subset=["sample_date"], keep="first", inplace=True)
official_stats_df.head()
official_stats_stored_df = official_stats_df.copy()
official_stats_stored_df["sample_date"] = official_stats_stored_df.sample_date.dt.strftime("%Y-%m-%d")
official_stats_stored_df.to_json(official_stats_path, orient="records", lines=True)
official_stats_df.drop(columns=["extraction_date"], inplace=True)
official_stats_df = confirmed_days_df.merge(official_stats_df, how="left")
official_stats_df.sort_values("sample_date", ascending=False, inplace=True)
official_stats_df.head()
official_stats_df[accumulated_values_columns] = \
official_stats_df[accumulated_values_columns] \
.astype(float).interpolate(limit_area="inside")
official_stats_df[interpolated_values_columns] = \
official_stats_df[accumulated_values_columns].diff(periods=-1)
official_stats_df.drop(columns="sample_date", inplace=True)
official_stats_df.head()
```
### Data Merge
```
result_summary_df = exposure_keys_summary_df.merge(
new_tek_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
result_summary_df = result_summary_df.merge(
shared_teks_uploaded_on_generation_date_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
result_summary_df = result_summary_df.merge(
estimated_shared_diagnoses_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
result_summary_df = result_summary_df.merge(
official_stats_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
result_summary_df = confirmed_eu_df.tail(daily_summary_days).merge(
result_summary_df, on=["sample_date_string"], how="left")
result_summary_df.head()
result_summary_df = confirmed_es_df.tail(daily_summary_days).merge(
result_summary_df, on=["sample_date_string"], how="left")
result_summary_df.head()
result_summary_df["sample_date"] = pd.to_datetime(result_summary_df.sample_date_string)
result_summary_df = result_summary_df.merge(source_regions_for_summary_df, how="left")
result_summary_df.set_index(["sample_date", "source_regions"], inplace=True)
result_summary_df.drop(columns=["sample_date_string"], inplace=True)
result_summary_df.sort_index(ascending=False, inplace=True)
result_summary_df.head()
with pd.option_context("mode.use_inf_as_na", True):
result_summary_df = result_summary_df.fillna(0).astype(int)
result_summary_df["teks_per_shared_diagnosis"] = \
(result_summary_df.shared_teks_by_upload_date / result_summary_df.shared_diagnoses).fillna(0)
result_summary_df["shared_diagnoses_per_covid_case"] = \
(result_summary_df.shared_diagnoses / result_summary_df.covid_cases).fillna(0)
result_summary_df["shared_diagnoses_per_covid_case_es"] = \
(result_summary_df.shared_diagnoses_es / result_summary_df.covid_cases_es).fillna(0)
result_summary_df.head(daily_plot_days)
def compute_aggregated_results_summary(days) -> pd.DataFrame:
aggregated_result_summary_df = result_summary_df.copy()
aggregated_result_summary_df["covid_cases_for_ratio"] = \
aggregated_result_summary_df.covid_cases.mask(
aggregated_result_summary_df.shared_diagnoses == 0, 0)
aggregated_result_summary_df["covid_cases_for_ratio_es"] = \
aggregated_result_summary_df.covid_cases_es.mask(
aggregated_result_summary_df.shared_diagnoses_es == 0, 0)
aggregated_result_summary_df = aggregated_result_summary_df \
.sort_index(ascending=True).fillna(0).rolling(days).agg({
"covid_cases": "sum",
"covid_cases_es": "sum",
"covid_cases_for_ratio": "sum",
"covid_cases_for_ratio_es": "sum",
"shared_teks_by_generation_date": "sum",
"shared_teks_by_upload_date": "sum",
"shared_diagnoses": "sum",
"shared_diagnoses_es": "sum",
}).sort_index(ascending=False)
with pd.option_context("mode.use_inf_as_na", True):
aggregated_result_summary_df = aggregated_result_summary_df.fillna(0).astype(int)
aggregated_result_summary_df["teks_per_shared_diagnosis"] = \
(aggregated_result_summary_df.shared_teks_by_upload_date /
aggregated_result_summary_df.covid_cases_for_ratio).fillna(0)
aggregated_result_summary_df["shared_diagnoses_per_covid_case"] = \
(aggregated_result_summary_df.shared_diagnoses /
aggregated_result_summary_df.covid_cases_for_ratio).fillna(0)
aggregated_result_summary_df["shared_diagnoses_per_covid_case_es"] = \
(aggregated_result_summary_df.shared_diagnoses_es /
aggregated_result_summary_df.covid_cases_for_ratio_es).fillna(0)
return aggregated_result_summary_df
aggregated_result_with_7_days_window_summary_df = compute_aggregated_results_summary(days=7)
aggregated_result_with_7_days_window_summary_df.head()
last_7_days_summary = aggregated_result_with_7_days_window_summary_df.to_dict(orient="records")[1]
last_7_days_summary
aggregated_result_with_14_days_window_summary_df = compute_aggregated_results_summary(days=13)
last_14_days_summary = aggregated_result_with_14_days_window_summary_df.to_dict(orient="records")[1]
last_14_days_summary
```
## Report Results
```
display_column_name_mapping = {
"sample_date": "Sample\u00A0Date\u00A0(UTC)",
"source_regions": "Source Countries",
"datetime_utc": "Timestamp (UTC)",
"upload_date": "Upload Date (UTC)",
"generation_to_upload_days": "Generation to Upload Period in Days",
"region": "Backend",
"region_x": "Backend\u00A0(A)",
"region_y": "Backend\u00A0(B)",
"common_teks": "Common TEKs Shared Between Backends",
"common_teks_fraction": "Fraction of TEKs in Backend (A) Available in Backend (B)",
"covid_cases": "COVID-19 Cases (Source Countries)",
"shared_teks_by_generation_date": "Shared TEKs by Generation Date (Source Countries)",
"shared_teks_by_upload_date": "Shared TEKs by Upload Date (Source Countries)",
"shared_teks_uploaded_on_generation_date": "Shared TEKs Uploaded on Generation Date (Source Countries)",
"shared_diagnoses": "Shared Diagnoses (Source Countries – Estimation)",
"teks_per_shared_diagnosis": "TEKs Uploaded per Shared Diagnosis (Source Countries)",
"shared_diagnoses_per_covid_case": "Usage Ratio (Source Countries)",
"covid_cases_es": "COVID-19 Cases (Spain)",
"app_downloads_es": "App Downloads (Spain – Official)",
"shared_diagnoses_es": "Shared Diagnoses (Spain – Official)",
"shared_diagnoses_per_covid_case_es": "Usage Ratio (Spain)",
}
summary_columns = [
"covid_cases",
"shared_teks_by_generation_date",
"shared_teks_by_upload_date",
"shared_teks_uploaded_on_generation_date",
"shared_diagnoses",
"teks_per_shared_diagnosis",
"shared_diagnoses_per_covid_case",
"covid_cases_es",
"app_downloads_es",
"shared_diagnoses_es",
"shared_diagnoses_per_covid_case_es",
]
summary_percentage_columns= [
"shared_diagnoses_per_covid_case_es",
"shared_diagnoses_per_covid_case",
]
```
### Daily Summary Table
```
result_summary_df_ = result_summary_df.copy()
result_summary_df = result_summary_df[summary_columns]
result_summary_with_display_names_df = result_summary_df \
.rename_axis(index=display_column_name_mapping) \
.rename(columns=display_column_name_mapping)
result_summary_with_display_names_df
```
### Daily Summary Plots
```
result_plot_summary_df = result_summary_df.head(daily_plot_days)[summary_columns] \
.droplevel(level=["source_regions"]) \
.rename_axis(index=display_column_name_mapping) \
.rename(columns=display_column_name_mapping)
summary_ax_list = result_plot_summary_df.sort_index(ascending=True).plot.bar(
title=f"Daily Summary",
rot=45, subplots=True, figsize=(15, 30), legend=False)
ax_ = summary_ax_list[0]
ax_.get_figure().tight_layout()
ax_.get_figure().subplots_adjust(top=0.95)
_ = ax_.set_xticklabels(sorted(result_plot_summary_df.index.strftime("%Y-%m-%d").tolist()))
for percentage_column in summary_percentage_columns:
percentage_column_index = summary_columns.index(percentage_column)
summary_ax_list[percentage_column_index].yaxis \
.set_major_formatter(matplotlib.ticker.PercentFormatter(1.0))
```
### Daily Generation to Upload Period Table
```
display_generation_to_upload_period_pivot_df = \
generation_to_upload_period_pivot_df \
.head(backend_generation_days)
display_generation_to_upload_period_pivot_df \
.head(backend_generation_days) \
.rename_axis(columns=display_column_name_mapping) \
.rename_axis(index=display_column_name_mapping)
fig, generation_to_upload_period_pivot_table_ax = plt.subplots(
figsize=(12, 1 + 0.6 * len(display_generation_to_upload_period_pivot_df)))
generation_to_upload_period_pivot_table_ax.set_title(
"Shared TEKs Generation to Upload Period Table")
sns.heatmap(
data=display_generation_to_upload_period_pivot_df
.rename_axis(columns=display_column_name_mapping)
.rename_axis(index=display_column_name_mapping),
fmt=".0f",
annot=True,
ax=generation_to_upload_period_pivot_table_ax)
generation_to_upload_period_pivot_table_ax.get_figure().tight_layout()
```
### Hourly Summary Plots
```
hourly_summary_ax_list = hourly_summary_df \
.rename_axis(index=display_column_name_mapping) \
.rename(columns=display_column_name_mapping) \
.plot.bar(
title=f"Last 24h Summary",
rot=45, subplots=True, legend=False)
ax_ = hourly_summary_ax_list[-1]
ax_.get_figure().tight_layout()
ax_.get_figure().subplots_adjust(top=0.9)
_ = ax_.set_xticklabels(sorted(hourly_summary_df.index.strftime("%Y-%m-%d@%H").tolist()))
```
### Publish Results
```
github_repository = os.environ.get("GITHUB_REPOSITORY")
if github_repository is None:
github_repository = "pvieito/Radar-STATS"
github_project_base_url = "https://github.com/" + github_repository
display_formatters = {
display_column_name_mapping["teks_per_shared_diagnosis"]: lambda x: f"{x:.2f}" if x != 0 else "",
display_column_name_mapping["shared_diagnoses_per_covid_case"]: lambda x: f"{x:.2%}" if x != 0 else "",
display_column_name_mapping["shared_diagnoses_per_covid_case_es"]: lambda x: f"{x:.2%}" if x != 0 else "",
}
general_columns = \
list(filter(lambda x: x not in display_formatters, display_column_name_mapping.values()))
general_formatter = lambda x: f"{x}" if x != 0 else ""
display_formatters.update(dict(map(lambda x: (x, general_formatter), general_columns)))
daily_summary_table_html = result_summary_with_display_names_df \
.head(daily_plot_days) \
.rename_axis(index=display_column_name_mapping) \
.rename(columns=display_column_name_mapping) \
.to_html(formatters=display_formatters)
multi_backend_summary_table_html = multi_backend_summary_df \
.head(daily_plot_days) \
.rename_axis(columns=display_column_name_mapping) \
.rename(columns=display_column_name_mapping) \
.rename_axis(index=display_column_name_mapping) \
.to_html(formatters=display_formatters)
def format_multi_backend_cross_sharing_fraction(x):
if pd.isna(x):
return "-"
elif round(x * 100, 1) == 0:
return ""
else:
return f"{x:.1%}"
multi_backend_cross_sharing_summary_table_html = multi_backend_cross_sharing_summary_df \
.rename_axis(columns=display_column_name_mapping) \
.rename(columns=display_column_name_mapping) \
.rename_axis(index=display_column_name_mapping) \
.to_html(
classes="table-center",
formatters=display_formatters,
float_format=format_multi_backend_cross_sharing_fraction)
multi_backend_cross_sharing_summary_table_html = \
multi_backend_cross_sharing_summary_table_html \
.replace("<tr>","<tr style=\"text-align: center;\">")
extraction_date_result_summary_df = \
result_summary_df[result_summary_df.index.get_level_values("sample_date") == extraction_date]
extraction_date_result_hourly_summary_df = \
hourly_summary_df[hourly_summary_df.extraction_date_with_hour == extraction_date_with_hour]
covid_cases = \
extraction_date_result_summary_df.covid_cases.item()
shared_teks_by_generation_date = \
extraction_date_result_summary_df.shared_teks_by_generation_date.item()
shared_teks_by_upload_date = \
extraction_date_result_summary_df.shared_teks_by_upload_date.item()
shared_diagnoses = \
extraction_date_result_summary_df.shared_diagnoses.item()
teks_per_shared_diagnosis = \
extraction_date_result_summary_df.teks_per_shared_diagnosis.item()
shared_diagnoses_per_covid_case = \
extraction_date_result_summary_df.shared_diagnoses_per_covid_case.item()
shared_teks_by_upload_date_last_hour = \
extraction_date_result_hourly_summary_df.shared_teks_by_upload_date.sum().astype(int)
display_source_regions = ", ".join(report_source_regions)
if len(report_source_regions) == 1:
display_brief_source_regions = report_source_regions[0]
else:
display_brief_source_regions = f"{len(report_source_regions)} 🇪🇺"
def get_temporary_image_path() -> str:
return os.path.join(tempfile.gettempdir(), str(uuid.uuid4()) + ".png")
def save_temporary_plot_image(ax):
if isinstance(ax, np.ndarray):
ax = ax[0]
media_path = get_temporary_image_path()
ax.get_figure().savefig(media_path)
return media_path
def save_temporary_dataframe_image(df):
import dataframe_image as dfi
df = df.copy()
df_styler = df.style.format(display_formatters)
media_path = get_temporary_image_path()
dfi.export(df_styler, media_path)
return media_path
summary_plots_image_path = save_temporary_plot_image(
ax=summary_ax_list)
summary_table_image_path = save_temporary_dataframe_image(
df=result_summary_with_display_names_df)
hourly_summary_plots_image_path = save_temporary_plot_image(
ax=hourly_summary_ax_list)
multi_backend_summary_table_image_path = save_temporary_dataframe_image(
df=multi_backend_summary_df)
generation_to_upload_period_pivot_table_image_path = save_temporary_plot_image(
ax=generation_to_upload_period_pivot_table_ax)
```
### Save Results
```
report_resources_path_prefix = "Data/Resources/Current/RadarCOVID-Report-"
result_summary_df.to_csv(
report_resources_path_prefix + "Summary-Table.csv")
result_summary_df.to_html(
report_resources_path_prefix + "Summary-Table.html")
hourly_summary_df.to_csv(
report_resources_path_prefix + "Hourly-Summary-Table.csv")
multi_backend_summary_df.to_csv(
report_resources_path_prefix + "Multi-Backend-Summary-Table.csv")
multi_backend_cross_sharing_summary_df.to_csv(
report_resources_path_prefix + "Multi-Backend-Cross-Sharing-Summary-Table.csv")
generation_to_upload_period_pivot_df.to_csv(
report_resources_path_prefix + "Generation-Upload-Period-Table.csv")
_ = shutil.copyfile(
summary_plots_image_path,
report_resources_path_prefix + "Summary-Plots.png")
_ = shutil.copyfile(
summary_table_image_path,
report_resources_path_prefix + "Summary-Table.png")
_ = shutil.copyfile(
hourly_summary_plots_image_path,
report_resources_path_prefix + "Hourly-Summary-Plots.png")
_ = shutil.copyfile(
multi_backend_summary_table_image_path,
report_resources_path_prefix + "Multi-Backend-Summary-Table.png")
_ = shutil.copyfile(
generation_to_upload_period_pivot_table_image_path,
report_resources_path_prefix + "Generation-Upload-Period-Table.png")
```
### Publish Results as JSON
```
def generate_summary_api_results(df: pd.DataFrame) -> list:
api_df = df.reset_index().copy()
api_df["sample_date_string"] = \
api_df["sample_date"].dt.strftime("%Y-%m-%d")
api_df["source_regions"] = \
api_df["source_regions"].apply(lambda x: x.split(","))
return api_df.to_dict(orient="records")
summary_api_results = \
generate_summary_api_results(df=result_summary_df)
today_summary_api_results = \
generate_summary_api_results(df=extraction_date_result_summary_df)[0]
summary_results = dict(
backend_identifier=report_backend_identifier,
source_regions=report_source_regions,
extraction_datetime=extraction_datetime,
extraction_date=extraction_date,
extraction_date_with_hour=extraction_date_with_hour,
last_hour=dict(
shared_teks_by_upload_date=shared_teks_by_upload_date_last_hour,
shared_diagnoses=0,
),
today=today_summary_api_results,
last_7_days=last_7_days_summary,
last_14_days=last_14_days_summary,
daily_results=summary_api_results)
summary_results = \
json.loads(pd.Series([summary_results]).to_json(orient="records"))[0]
with open(report_resources_path_prefix + "Summary-Results.json", "w") as f:
json.dump(summary_results, f, indent=4)
```
### Publish on README
```
with open("Data/Templates/README.md", "r") as f:
readme_contents = f.read()
readme_contents = readme_contents.format(
extraction_date_with_hour=extraction_date_with_hour,
github_project_base_url=github_project_base_url,
daily_summary_table_html=daily_summary_table_html,
multi_backend_summary_table_html=multi_backend_summary_table_html,
multi_backend_cross_sharing_summary_table_html=multi_backend_cross_sharing_summary_table_html,
display_source_regions=display_source_regions)
with open("README.md", "w") as f:
f.write(readme_contents)
```
### Publish on Twitter
```
enable_share_to_twitter = os.environ.get("RADARCOVID_REPORT__ENABLE_PUBLISH_ON_TWITTER")
github_event_name = os.environ.get("GITHUB_EVENT_NAME")
if enable_share_to_twitter and github_event_name == "schedule" and \
(shared_teks_by_upload_date_last_hour or not are_today_results_partial):
import tweepy
twitter_api_auth_keys = os.environ["RADARCOVID_REPORT__TWITTER_API_AUTH_KEYS"]
twitter_api_auth_keys = twitter_api_auth_keys.split(":")
auth = tweepy.OAuthHandler(twitter_api_auth_keys[0], twitter_api_auth_keys[1])
auth.set_access_token(twitter_api_auth_keys[2], twitter_api_auth_keys[3])
api = tweepy.API(auth)
summary_plots_media = api.media_upload(summary_plots_image_path)
summary_table_media = api.media_upload(summary_table_image_path)
generation_to_upload_period_pivot_table_image_media = api.media_upload(generation_to_upload_period_pivot_table_image_path)
media_ids = [
summary_plots_media.media_id,
summary_table_media.media_id,
generation_to_upload_period_pivot_table_image_media.media_id,
]
if are_today_results_partial:
today_addendum = " (Partial)"
else:
today_addendum = ""
def format_shared_diagnoses_per_covid_case(value) -> str:
if value == 0:
return "–"
return f"≤{value:.2%}"
display_shared_diagnoses_per_covid_case = \
format_shared_diagnoses_per_covid_case(value=shared_diagnoses_per_covid_case)
display_last_14_days_shared_diagnoses_per_covid_case = \
format_shared_diagnoses_per_covid_case(value=last_14_days_summary["shared_diagnoses_per_covid_case"])
display_last_14_days_shared_diagnoses_per_covid_case_es = \
format_shared_diagnoses_per_covid_case(value=last_14_days_summary["shared_diagnoses_per_covid_case_es"])
status = textwrap.dedent(f"""
#RadarCOVID – {extraction_date_with_hour}
Today{today_addendum}:
- Uploaded TEKs: {shared_teks_by_upload_date:.0f} ({shared_teks_by_upload_date_last_hour:+d} last hour)
- Shared Diagnoses: ≤{shared_diagnoses:.0f}
- Usage Ratio: {display_shared_diagnoses_per_covid_case}
Last 14 Days:
- Usage Ratio (Estimation): {display_last_14_days_shared_diagnoses_per_covid_case}
- Usage Ratio (Official): {display_last_14_days_shared_diagnoses_per_covid_case_es}
Info: {github_project_base_url}#documentation
""")
status = status.encode(encoding="utf-8")
api.update_status(status=status, media_ids=media_ids)
```
| github_jupyter |
## 1. Adding Student Details
```
import time
import numpy as np
from json import loads, dumps
data = {}
history = {}
reg_no = str(input('Enter your registraion no: '))
name = str(input('Name : '))
mail = str(input('Mail-ID : '))
phone = str(input('Phone No : '))
section = str(input('Section : '))
dct = {}
dct['name'] = name
dct['mail'] = mail
dct['phone'] = phone
dct['section'] = section
data[reg_no] = dct
data
```
## Saving student details in JSON file
```
from json import loads, dumps
type(data)
txt = dumps(data)
txt
fd = open('data.json','w')
fd.write(txt)
fd.close()
```
## Loading the data from JSON
```
fd = open('data.json','r')
txt = fd.read()
fd.close()
```
## Adding user details in JSON Directly
```
fd = open('data.json','r')
txt = fd.read()
fd.close()
data = loads(txt)
reg_no = str(input('Enter your registraion no: '))
name = str(input('Name : '))
mail = str(input('Mail-ID : '))
phone = str(input('Phone No : '))
section = str(input('Section : '))
dct = {}
dct['name'] = name
dct['mail'] = mail
dct['phone'] = phone
dct['section'] = section
data[reg_no] = dct
txt = dumps(data)
fd = open('data.json','w')
fd.write(txt)
fd.close()
```
## Get User Details based on Reg No
```
fd = open('data.json','r')
txt = fd.read()
fd.close()
data = loads(txt)
user_reg = str(input('Enter the registration no: '))
print('-'*35)
print('Name : ', data[user_reg]['name'])
print('Mail : ', data[user_reg]['mail'])
print('Phone : ', data[user_reg]['phone'])
print('Section : ', data[user_reg]['section'])
print('-'*35)
```
## Get User Details based on Name
```
fd = open('data.json','r')
txt = fd.read()
fd.close()
data = loads(txt)
name = input('Enter the name: ')
for key in data.keys():
if(name.lower() == data[key]['name'].lower()):
print('-'*35)
print("Registration No : ", key)
print('Name : ', data[key]['name'])
print('Mail : ', data[key]['mail'])
print('Phone : ', data[key]['phone'])
print('Section : ', data[key]['section'])
print('-'*35)
```
## Saving Search History in JSON
```
fd = open('data.json','r')
txt = fd.read()
fd.close()
data = loads(txt)
name = input('Enter the name: ')
for key in data.keys():
if(name.lower() == data[key]['name'].lower()):
print('-'*35)
print("Registration No : ", key)
print('Name : ', data[key]['name'])
print('Mail : ', data[key]['mail'])
print('Phone : ', data[key]['phone'])
print('Section : ', data[key]['section'])
print('-'*35)
if (name in history.keys()):
history[name]['frequency'] += 1
history[name]['time'] = time.ctime()
else:
log = {}
log['time'] = time.ctime()
log['frequency'] = 1
history[name] = log
txt = dumps(history)
fd = open('History.json','w')
fd.write(txt)
fd.close()
```
| github_jupyter |
```
import numpy as np
import matplotlib.pyplot as plt
```
# BCC and FCC
```
def average_quantities(E_list,V_list,S_list,Comp_list):
average_E_list=np.empty(len(Comp_list))
average_S_list=np.empty(len(Comp_list))
average_V_list=np.empty(len(Comp_list))
average_b_list=np.empty(len(Comp_list))
average_nu_list=np.empty(len(Comp_list))
delta_Vn_list=np.empty([len(Comp_list),len(E_list)])
for i in range(len(Comp_list)):
c = Comp_list[i]
#print(c)
avg_E = np.dot(E_list,c)
avg_S = np.dot(S_list,c)
avg_nu = avg_E/(2*avg_S)-1
avg_V = np.dot(V_list,c)
delta_Vn = V_list-avg_V
avg_b = (4*avg_V)**(1/3)/(2**0.5)
average_E_list[i]=(avg_E)
average_S_list[i]=(avg_S)
average_V_list[i]=(avg_V)
average_b_list[i]=(avg_b)
average_nu_list[i]=(avg_nu)
delta_Vn_list[i,:]=(delta_Vn)
return average_E_list,average_S_list,average_V_list,average_b_list,average_nu_list,delta_Vn_list
def curtin_BCC(average_S_list,average_V_list,average_b_list,average_nu_list,delta_Vn_list,Comp_list,T,ep):
kc = 1.38064852*10**(-23) #J/K
J2eV=6.2415093433*10**18
ep0 = 10**4
aver_S = average_S_list
aver_b = average_b_list
sum_cndVn_b6_list = np.empty(len(Comp_list))
dEb_list=np.empty(len(Comp_list))
Ty0_list=np.empty(len(Comp_list))
delta_ss_list=np.empty(len(Comp_list))
for i in range(len(Comp_list)):
c = Comp_list[i]
#print(delta_Vn_list[i,:])
#print(delta_Vn_list[i,:]**2)
sum_cndVn_b6 = np.dot(c,delta_Vn_list[i,:]**2)/average_b_list[i]**6
#print(sum_cndVn_b6)
sum_cndVn_b6_list[i]=sum_cndVn_b6
q_nu = ((1 + average_nu_list)/(1 - average_nu_list))
dEb = 2.00 * 0.123**(1/3) * aver_S * aver_b**3 * q_nu**(2/3) * sum_cndVn_b6**(1/3)
Ty0 = 0.040 * 0.123**(-1/3) * aver_S * q_nu**(4/3) * sum_cndVn_b6**(2/3)
Ty_T = Ty0 * (1 - ((kc*T)/(dEb) * np.log(ep0/ep))**(2/3) )
if Ty_T<=Ty0/2:
Ty_T = Ty0 * np.exp(-1/0.55* kc*T/dEb*np.log(ep0/ep))
delta_ss = 3.06*Ty_T
dEb_list[i]=dEb
Ty0_list[i]=Ty0
delta_ss_list[i]=delta_ss
return dEb_list, Ty0_list, delta_ss_list
def curtin_BCC_old(average_S_list,average_V_list,average_b_list,average_nu_list,delta_Vn_list,Comp_list,T,ep):
kc = 1.38064852*10**(-23) #J/K
J2eV=6.2415093433*10**18
ep0 = 10**4
aver_S = average_S_list
aver_b = average_b_list
sum_cndVn_b6_list = np.empty(len(Comp_list))
dEb_list=np.empty(len(Comp_list))
Ty0_list=np.empty(len(Comp_list))
delta_ss_list=np.empty(len(Comp_list))
for i in range(len(Comp_list)):
c = Comp_list[i]
#print(delta_Vn_list[i,:])
#print(delta_Vn_list[i,:]**2)
sum_cndVn_b6 = np.dot(c,delta_Vn_list[i,:]**2)/average_b_list[i]**6
#print(sum_cndVn_b6)
sum_cndVn_b6_list[i]=sum_cndVn_b6
q_nu = ((1 + average_nu_list)/(1 - average_nu_list))
dEb = 2.00 * 0.123**(1/3) * aver_S * aver_b**3 * q_nu**(2/3) * sum_cndVn_b6**(1/3)
Ty0 = 0.040 * 0.123**(-1/3) * aver_S * q_nu**(4/3) * sum_cndVn_b6**(2/3)
Ty_T = Ty0 * (1 - ((kc*T)/(dEb) * np.log(ep0/ep))**(2/3) )
delta_ss = 3.06*Ty_T
dEb_list[i]=dEb
Ty0_list[i]=Ty0
delta_ss_list[i]=delta_ss
return dEb_list, Ty0_list, delta_ss_list
# Mo-Ta-Nb
V_list=np.array([15.941,18.345,18.355])*1e-30
E_list=np.array([326.78,170.02,69.389])*1e9
S_list=np.array([126.4,62.8,24.2])*1e9
Comp_list = np.array([[0.75,0.,0.25]])
ep = 1e-3
T = 1573
average_E_list,average_S_list,average_V_list,average_b_list,average_nu_list,delta_Vn_list= average_quantities(E_list,V_list,S_list,Comp_list)
dEb_list, Ty0_list, delta_ss_list=curtin_BCC(average_S_list,average_V_list,average_b_list,average_nu_list,delta_Vn_list,Comp_list,T,ep)
dEb_list2, Ty0_list2, delta_ss_list2=curtin_BCC_old(average_S_list,average_V_list,average_b_list,average_nu_list,delta_Vn_list,Comp_list,T,ep)
T_list = np.linspace(0,1600,170)
dEb_list_comp0 = np.empty(len(T_list))
Ty0_list_comp0 = np.empty(len(T_list))
delta_ss_list_comp0 = np.empty(len(T_list))
dEb_list_comp0_old = np.empty(len(T_list))
Ty0_list_comp0_old = np.empty(len(T_list))
delta_ss_list_comp0_old = np.empty(len(T_list))
for i in range(len(T_list)):
T = T_list[i]
dEb_list, Ty0_list, delta_ss_list=curtin_BCC(average_S_list,average_V_list,average_b_list,average_nu_list,delta_Vn_list,Comp_list,T,ep)
dEb_list_comp0[i]=(dEb_list[0])
Ty0_list_comp0[i]=(Ty0_list[0])
delta_ss_list_comp0[i]=(delta_ss_list[0]/1e6)
dEb_list2, Ty0_list2, delta_ss_list2=curtin_BCC_old(average_S_list,average_V_list,average_b_list,average_nu_list,delta_Vn_list,Comp_list,T,ep)
dEb_list_comp0_old[i]=(dEb_list2[0])
Ty0_list_comp0_old[i]=(Ty0_list2[0])
delta_ss_list_comp0_old[i]=(delta_ss_list2[0]/1e6)
plt.plot(T_list,delta_ss_list_comp0)
plt.plot(T_list,delta_ss_list_comp0_old)
Comp_list = np.array([[0.1,0.00,0.9]])
average_E_list,average_S_list,average_V_list,average_b_list,average_nu_list,delta_Vn_list= average_quantities(E_list,V_list,S_list,Comp_list)
dEb_list, Ty0_list, delta_ss_list=curtin_BCC(average_S_list,average_V_list,average_b_list,average_nu_list,delta_Vn_list,Comp_list,T,ep)
T_list = np.linspace(0,1600,170)
dEb_list_comp0 = np.empty(len(T_list))
Ty0_list_comp0 = np.empty(len(T_list))
delta_ss_list_comp0 = np.empty(len(T_list))
dEb_list_comp0_old = np.empty(len(T_list))
Ty0_list_comp0_old = np.empty(len(T_list))
delta_ss_list_comp0_old = np.empty(len(T_list))
for i in range(len(T_list)):
T = T_list[i]
dEb_list, Ty0_list, delta_ss_list=curtin_BCC(average_S_list,average_V_list,average_b_list,average_nu_list,delta_Vn_list,Comp_list,T,ep)
dEb_list_comp0[i]=(dEb_list[0])
Ty0_list_comp0[i]=(Ty0_list[0])
delta_ss_list_comp0[i]=(delta_ss_list[0]/1e6)
dEb_list2, Ty0_list2, delta_ss_list2=curtin_BCC_old(average_S_list,average_V_list,average_b_list,average_nu_list,delta_Vn_list,Comp_list,T,ep)
dEb_list_comp0_old[i]=(dEb_list2[0])
Ty0_list_comp0_old[i]=(Ty0_list2[0])
delta_ss_list_comp0_old[i]=(delta_ss_list2[0]/1e6)
plt.plot(T_list,delta_ss_list_comp0)
plt.plot(T_list,delta_ss_list_comp0_old)
```
| github_jupyter |
# Implement an Accelerometer
In this notebook you will define your own `get_derivative_from_data` function and use it to differentiate position data ONCE to get velocity information and then again to get acceleration information.
In part 1 I will demonstrate what this process looks like and then in part 2 you'll implement the function yourself.
-----
## Part 1 - Reminder and Demonstration
```
# run this cell for required imports
from helpers import process_data
from helpers import get_derivative_from_data as solution_derivative
from matplotlib import pyplot as plt
# load the parallel park data
PARALLEL_PARK_DATA = process_data("parallel_park.pickle")
# get the relevant columns
timestamps = [row[0] for row in PARALLEL_PARK_DATA]
displacements = [row[1] for row in PARALLEL_PARK_DATA]
# calculate first derivative
speeds = solution_derivative(displacements, timestamps)
# plot
plt.title("Position and Velocity vs Time")
plt.xlabel("Time (seconds)")
plt.ylabel("Position (blue) and Speed (orange)")
plt.scatter(timestamps, displacements)
plt.scatter(timestamps[1:], speeds)
plt.show()
```
But you just saw that acceleration is the derivative of velocity... which means we can use the same derivative function to calculate acceleration!
```
# calculate SECOND derivative
accelerations = solution_derivative(speeds, timestamps[1:])
# plot (note the slicing of timestamps from 2 --> end)
plt.scatter(timestamps[2:], accelerations)
plt.show()
```
As you can see, this parallel park motion consisted of four segments with different (but constant) acceleration. We can plot all three quantities at once like this:
```
plt.title("x(t), v(t), a(t)")
plt.xlabel("Time (seconds)")
plt.ylabel("x (blue), v (orange), a (green)")
plt.scatter(timestamps, displacements)
plt.scatter(timestamps[1:], speeds)
plt.scatter(timestamps[2:], accelerations)
plt.show()
```
----
## Part 2 - Implement it yourself!
```
def get_derivative_from_data(position_data, time_data):
# TODO - try your best to implement this code yourself!
# if you get really stuck feel free to go back
# to the previous notebook for a hint.
return
# Testing part 1 - visual testing of first derivative
# compare this output to the corresponding graph above.
speeds = get_derivative_from_data(displacements, timestamps)
plt.title("Position and Velocity vs Time")
plt.xlabel("Time (seconds)")
plt.ylabel("Position (blue) and Speed (orange)")
plt.scatter(timestamps, displacements)
plt.scatter(timestamps[1:], speeds)
plt.show()
# Testing part 2 - visual testing of second derivative
# compare this output to the corresponding graph above.
speeds = get_derivative_from_data(displacements, timestamps)
accelerations = get_derivative_from_data(speeds, timestamps[1:])
plt.title("x(t), v(t), a(t)")
plt.xlabel("Time (seconds)")
plt.ylabel("x (blue), v (orange), a (green)")
plt.scatter(timestamps, displacements)
plt.scatter(timestamps[1:], speeds)
plt.scatter(timestamps[2:], accelerations)
plt.show()
```
| github_jupyter |
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-59152712-8');
</script>
# Weyl Scalars and Invariants: An Introduction to Einstein Toolkit Diagnostic Thorns
## Author: Patrick Nelson & Zach Etienne
### Formatting improvements courtesy Brandon Clark
[comment]: <> (Abstract: TODO)
**Notebook Status:** <font color='green'><b> Validated </b></font>
**Validation Notes:** Numerical results from this module have been confirmed to agree with the trusted WeylScal4 Einstein Toolkit thorn to roundoff error.
### NRPy+ Source Code for this module:
* [WeylScal4NRPD/WeylScalars_Cartesian.py](../edit/WeylScal4NRPD/WeylScalars_Cartesian.py)
* [WeylScal4NRPD/WeylScalarInvariants_Cartesian.py](../edit/WeylScal4NRPD/WeylScalarInvariants_Cartesian.py)
which are fully documented in the NRPy+ [Tutorial-WeylScalars-Cartesian](Tutorial-WeylScalars-Cartesian.ipynb) module on using NRPy+ to construct the Weyl scalars and invariants as SymPy expressions.
## Introduction:
In the [previous tutorial notebook](Tutorial-WeylScalars-Cartesian.ipynb), we constructed within SymPy full expressions for the real and imaginary components of all five Weyl scalars $\psi_0$, $\psi_1$, $\psi_2$, $\psi_3$, and $\psi_4$ as well as the Weyl invariants. So that we can easily access these expressions, we have ported the Python code needed to generate the Weyl scalar SymPy expressions to [WeylScal4NRPD/WeylScalars_Cartesian.py](../edit/WeylScal4NRPD/WeylScalars_Cartesian.py), and the Weyl invariant SymPy expressions to [WeylScal4NRPD/WeylScalarInvariants_Cartesian.py](../edit/WeylScal4NRPD/WeylScalarInvariants_Cartesian.py).
Here we will work through the steps necessary to construct an Einstein Toolkit diagnostic thorn (module), starting from these SymPy expressions, which computes these expressions using ADMBase gridfunctions as input. This tutorial is in two steps:
1. Call on NRPy+ to convert the SymPy expressions for the Weyl Scalars and associated Invariants into one C-code kernel for each.
1. Write the C code and build up the needed Einstein Toolkit infrastructure (i.e., the .ccl files).
<a id='toc'></a>
# Table of Contents
$$\label{toc}$$
This notebook is organized as follows
1. [Step 1](#nrpy): Call on NRPy+ to convert the SymPy expressions for the Weyl scalars and associated invariants into one C-code kernel for each
1. [Step 2](#etk): Interfacing with the Einstein Toolkit
1. [Step 2.a](#etkc): Constructing the Einstein Toolkit C-code calling functions that include the C code kernels
1. [Step 2.b](#cclfiles): CCL files - Define how this module interacts and interfaces with the larger Einstein Toolkit infrastructure
1. [Step 2.c](#etk_list): Add the C file to Einstein Toolkit compilation list
1. [Step 3](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file
<a id='nrpy'></a>
# Step 1: Call on NRPy+ to convert the SymPy expressions for the Weyl scalars and associated invariants into one C-code kernel for each \[Back to [top](#toc)\]
$$\label{nrpy}$$
<font color='red'><b>WARNING</b></font>: It takes some time to generate the CSE-optimized C code kernels for these quantities, especially the Weyl scalars... expect 5 minutes on a modern computer.
```
from outputC import * # NRPy+: Core C code output module
import finite_difference as fin # NRPy+: Finite difference C code generation module
import NRPy_param_funcs as par # NRPy+: Parameter interface
import grid as gri # NRPy+: Functions having to do with numerical grids
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
import reference_metric as rfm # NRPy+: Reference metric support
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
import loop as lp # NRPy+: loop infrasructure
import shutil, os, sys, time # Standard Python modules for multiplatform OS-level functions, benchmarking
# Step 1: Set the coordinate system for the numerical grid to Cartesian.
par.set_parval_from_str("reference_metric::CoordSystem","Cartesian")
rfm.reference_metric() # Create ReU, ReDD needed for rescaling B-L initial data, generating BSSN RHSs, etc.
# Step 2: Set the finite differencing order to FD_order to 4
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER", 4)
# Step 3: Create output directories
!mkdir WeylScal4NRPD 2>/dev/null # 2>/dev/null: Don't throw an error or warning if the directory already exists.
!mkdir WeylScal4NRPD/src 2>/dev/null # 2>/dev/null: Don't throw an error or warning if the directory already exists.
# Step 4: Generate symbolic expressions
# Since we are writing an Einstein Toolkit thorn, we must set our memory access style to "ETK".
par.set_parval_from_str("grid::GridFuncMemAccess","ETK")
import BSSN.Psi4_tetrads as BP4t
par.set_parval_from_str("BSSN.Psi4_tetrads::TetradChoice","QuasiKinnersley")
#par.set_parval_from_str("BSSN.Psi4_tetrads::UseCorrectUnitNormal","True")
import BSSN.Psi4 as BP4
print("Generating symbolic expressions for psi4...")
start = time.time()
BP4.Psi4()
end = time.time()
print("(BENCH) Finished psi4 symbolic expressions in "+str(end-start)+" seconds.")
psi4r = gri.register_gridfunctions("AUX","psi4r")
psi4r0pt = gri.register_gridfunctions("AUX","psi4r0pt")
psi4r1pt = gri.register_gridfunctions("AUX","psi4r1pt")
psi4r2pt = gri.register_gridfunctions("AUX","psi4r2pt")
# Construct RHSs:
psi4r_lhrh = [lhrh(lhs=gri.gfaccess("out_gfs","psi4r"),rhs=BP4.psi4_re_pt[0]+BP4.psi4_re_pt[1]+BP4.psi4_re_pt[2]),
lhrh(lhs=gri.gfaccess("out_gfs","psi4r0pt"),rhs=BP4.psi4_re_pt[0]),
lhrh(lhs=gri.gfaccess("out_gfs","psi4r1pt"),rhs=BP4.psi4_re_pt[1]),
lhrh(lhs=gri.gfaccess("out_gfs","psi4r2pt"),rhs=BP4.psi4_re_pt[2])]
# Generating the CSE is the slowest
# operation in this notebook, and much of the CSE
# time is spent sorting CSE expressions. Disabling
# this sorting makes the C codegen 3-4x faster,
# but the tradeoff is that every time this is
# run, the CSE patterns will be different
# (though they should result in mathematically
# *identical* expressions). You can expect
# roundoff-level differences as a result.
start = time.time()
print("Generating C code kernel for psi4r...")
psi4r_CcodeKernel = fin.FD_outputC("returnstring",psi4r_lhrh,params="outCverbose=False,CSE_sorting=none")
end = time.time()
print("(BENCH) Finished psi4r C code kernel generation in "+str(end-start)+" seconds.")
psi4r_looped = lp.loop(["i2","i1","i0"],["2","2","2"],["cctk_lsh[2]-2","cctk_lsh[1]-2","cctk_lsh[0]-2"],\
["1","1","1"],["#pragma omp parallel for","",""],"","""
const CCTK_REAL xx0 = xGF[CCTK_GFINDEX3D(cctkGH, i0,i1,i2)];
const CCTK_REAL xx1 = yGF[CCTK_GFINDEX3D(cctkGH, i0,i1,i2)];
const CCTK_REAL xx2 = zGF[CCTK_GFINDEX3D(cctkGH, i0,i1,i2)];
"""+psi4r_CcodeKernel)
with open("WeylScal4NRPD/src/WeylScal4NRPD_psi4r.h", "w") as file:
file.write(str(psi4r_looped))
```
<a id='etk'></a>
# Step 2: Interfacing with the Einstein Toolkit \[Back to [top](#toc)\]
$$\label{etk}$$
<a id='etkc'></a>
## Step 2.a: Constructing the Einstein Toolkit calling functions that include the C code kernels \[Back to [top](#toc)\]
$$\label{etkc}$$
Now that we have generated the C code kernels (`WeylScal4NRPD_psis.h` and `WeylScal4NRPD_invars.h`) express the Weyl scalars and invariants as CSE-optimized finite-difference expressions, we next need to write the C code functions that incorporate these kernels and are called by the Einstein Toolkit scheduler.
```
%%writefile WeylScal4NRPD/src/WeylScal4NRPD.c
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "cctk.h"
#include "cctk_Arguments.h"
#include "cctk_Parameters.h"
void WeylScal4NRPD_calc_psi4r(const cGH* restrict const cctkGH,const int *cctk_lsh,const int *cctk_nghostzones,
const CCTK_REAL invdx0,const CCTK_REAL invdx1,const CCTK_REAL invdx2,
const CCTK_REAL *xGF,const CCTK_REAL *yGF,const CCTK_REAL *zGF,
const CCTK_REAL *hDD00GF,const CCTK_REAL *hDD01GF,const CCTK_REAL *hDD02GF,const CCTK_REAL *hDD11GF,const CCTK_REAL *hDD12GF,const CCTK_REAL *hDD22GF,
const CCTK_REAL *aDD00GF,const CCTK_REAL *aDD01GF,const CCTK_REAL *aDD02GF,const CCTK_REAL *aDD11GF,const CCTK_REAL *aDD12GF,const CCTK_REAL *aDD22GF,
const CCTK_REAL *trKGF,const CCTK_REAL *cfGF,
CCTK_REAL *psi4rGF,
CCTK_REAL *psi4r0ptGF,
CCTK_REAL *psi4r1ptGF,
CCTK_REAL *psi4r2ptGF) {
DECLARE_CCTK_PARAMETERS;
#include "WeylScal4NRPD_psi4r.h"
}
extern void WeylScal4NRPD_mainfunction(CCTK_ARGUMENTS) {
DECLARE_CCTK_PARAMETERS;
DECLARE_CCTK_ARGUMENTS;
if(cctk_iteration % WeylScal4NRPD_calc_every != 0) { return; }
const CCTK_REAL invdx0 = 1.0 / (CCTK_DELTA_SPACE(0));
const CCTK_REAL invdx1 = 1.0 / (CCTK_DELTA_SPACE(1));
const CCTK_REAL invdx2 = 1.0 / (CCTK_DELTA_SPACE(2));
/* Now, to calculate psi4: */
WeylScal4NRPD_calc_psi4r(cctkGH,cctk_lsh,cctk_nghostzones,
invdx0,invdx1,invdx2,
x,y,z,
hDD00GF,hDD01GF,hDD02GF,hDD11GF,hDD12GF,hDD22GF,
aDD00GF,aDD01GF,aDD02GF,aDD11GF,aDD12GF,aDD22GF,
trKGF,cfGF,
psi4rGF,
psi4r0ptGF,psi4r1ptGF,psi4r2ptGF);
}
# First we convert from ADM to BSSN, as is required to convert initial data
# (given using) ADM quantities, to the BSSN evolved variables
import BSSN.ADM_Numerical_Spherical_or_Cartesian_to_BSSNCurvilinear as atob
IDhDD,IDaDD,IDtrK,IDvetU,IDbetU,IDalpha,IDcf,IDlambdaU = \
atob.Convert_Spherical_or_Cartesian_ADM_to_BSSN_curvilinear("Cartesian","DoNotOutputADMInputFunction",os.path.join("WeylScal4NRPD","src"))
# Store the original list of registered gridfunctions; we'll want to unregister
# all the *SphorCart* gridfunctions after we're finished with them below.
orig_glb_gridfcs_list = []
for gf in gri.glb_gridfcs_list:
orig_glb_gridfcs_list.append(gf)
alphaSphorCart = gri.register_gridfunctions( "AUXEVOL", "alphaSphorCart")
betaSphorCartU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL", "betaSphorCartU")
BSphorCartU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL", "BSphorCartU")
gammaSphorCartDD = ixp.register_gridfunctions_for_single_rank2("AUXEVOL", "gammaSphorCartDD", "sym01")
KSphorCartDD = ixp.register_gridfunctions_for_single_rank2("AUXEVOL", "KSphorCartDD", "sym01")
# ADM to BSSN conversion, used for converting ADM initial data into a form readable by this thorn.
# ADM to BSSN, Part 1: Set up function call and pointers to ADM gridfunctions
outstr = """
#include <math.h>
#include "cctk.h"
#include "cctk_Arguments.h"
#include "cctk_Parameters.h"
void WeylScal4NRPD_ADM_to_BSSN(CCTK_ARGUMENTS) {
DECLARE_CCTK_ARGUMENTS;
DECLARE_CCTK_PARAMETERS;
CCTK_REAL *alphaSphorCartGF = alp;
"""
# It's ugly if we output code in the following ordering, so we'll first
# output to a string and then sort the string to beautify the code a bit.
outstrtmp = []
for i in range(3):
outstrtmp.append(" CCTK_REAL *betaSphorCartU"+str(i)+"GF = beta"+chr(ord('x')+i)+";\n")
# outstrtmp.append(" CCTK_REAL *BSphorCartU"+str(i)+"GF = dtbeta"+chr(ord('x')+i)+";\n")
for j in range(i,3):
outstrtmp.append(" CCTK_REAL *gammaSphorCartDD"+str(i)+str(j)+"GF = g"+chr(ord('x')+i)+chr(ord('x')+j)+";\n")
outstrtmp.append(" CCTK_REAL *KSphorCartDD"+str(i)+str(j)+"GF = k"+chr(ord('x')+i)+chr(ord('x')+j)+";\n")
outstrtmp.sort()
for line in outstrtmp:
outstr += line
# ADM to BSSN, Part 2: Set up ADM to BSSN conversions for BSSN gridfunctions that do not require
# finite-difference derivatives (i.e., all gridfunctions except lambda^i (=Gamma^i
# in non-covariant BSSN)):
# h_{ij}, a_{ij}, trK, vet^i=beta^i,bet^i=B^i, cf (conformal factor), and alpha
all_but_lambdaU_expressions = [
lhrh(lhs=gri.gfaccess("in_gfs","hDD00"),rhs=IDhDD[0][0]),
lhrh(lhs=gri.gfaccess("in_gfs","hDD01"),rhs=IDhDD[0][1]),
lhrh(lhs=gri.gfaccess("in_gfs","hDD02"),rhs=IDhDD[0][2]),
lhrh(lhs=gri.gfaccess("in_gfs","hDD11"),rhs=IDhDD[1][1]),
lhrh(lhs=gri.gfaccess("in_gfs","hDD12"),rhs=IDhDD[1][2]),
lhrh(lhs=gri.gfaccess("in_gfs","hDD22"),rhs=IDhDD[2][2]),
lhrh(lhs=gri.gfaccess("in_gfs","aDD00"),rhs=IDaDD[0][0]),
lhrh(lhs=gri.gfaccess("in_gfs","aDD01"),rhs=IDaDD[0][1]),
lhrh(lhs=gri.gfaccess("in_gfs","aDD02"),rhs=IDaDD[0][2]),
lhrh(lhs=gri.gfaccess("in_gfs","aDD11"),rhs=IDaDD[1][1]),
lhrh(lhs=gri.gfaccess("in_gfs","aDD12"),rhs=IDaDD[1][2]),
lhrh(lhs=gri.gfaccess("in_gfs","aDD22"),rhs=IDaDD[2][2]),
lhrh(lhs=gri.gfaccess("in_gfs","trK"),rhs=IDtrK),
lhrh(lhs=gri.gfaccess("in_gfs","vetU0"),rhs=IDvetU[0]),
lhrh(lhs=gri.gfaccess("in_gfs","vetU1"),rhs=IDvetU[1]),
lhrh(lhs=gri.gfaccess("in_gfs","vetU2"),rhs=IDvetU[2]),
lhrh(lhs=gri.gfaccess("in_gfs","alpha"),rhs=IDalpha),
lhrh(lhs=gri.gfaccess("in_gfs","cf"),rhs=IDcf)]
outCparams = "preindent=1,outCfileaccess=a,outCverbose=False,includebraces=False"
all_but_lambdaU_outC = fin.FD_outputC("returnstring",all_but_lambdaU_expressions, outCparams)
outstr += lp.loop(["i2","i1","i0"],["0","0","0"],["cctk_lsh[2]","cctk_lsh[1]","cctk_lsh[0]"],
["1","1","1"],["#pragma omp parallel for","",""]," ",all_but_lambdaU_outC)
outstr += "} // END void WeylScal4NRPD_ADM_to_BSSN(CCTK_ARGUMENTS)\n"
with open("WeylScal4NRPD/src/ADM_to_BSSN.c", "w") as file:
file.write(str(outstr))
```
<a id='cclfiles'></a>
## Step 2.b: CCL files - Define how this module interacts and interfaces with the larger Einstein Toolkit infrastructure \[Back to [top](#toc)\]
$$\label{cclfiles}$$
Writing a module ("thorn") within the Einstein Toolkit requires that three "ccl" files be constructed, all in the root directory of the thorn:
1.`interface.ccl`: defines the gridfunction groups needed, and provides keywords denoting what this thorn provides and what it should inherit from other thorns.
1. `param.ccl`: specifies free parameters within the thorn.
1. `schedule.ccl`: allocates storage for gridfunctions, defines how the thorn's functions should be scheduled in a broader simulation, and specifies the regions of memory written to or read from gridfunctions.
Let's start with `interface.ccl`. The [official Einstein Toolkit (Cactus) documentation](http://einsteintoolkit.org/usersguide/UsersGuide.html) defines what must/should be included in an `interface.ccl` file [**here**](http://einsteintoolkit.org/usersguide/UsersGuidech12.html#x17-178000D2.2).
```
%%writefile WeylScal4NRPD/interface.ccl
# With "implements", we give our thorn its unique name.
implements: WeylScal4NRPD
# By "inheriting" other thorns, we tell the Toolkit that we
# will rely on variables/function that exist within those
# functions.
inherits: admbase Boundary Grid methodoflines
# Tell the Toolkit that we want the various Weyl scalars
# and invariants to be visible to other thorns by using
# the keyword "public". Note that declaring these
# gridfunctions *does not* allocate memory for them;
# that is done by the schedule.ccl file.
public:
CCTK_REAL NRPyPsi4_group type=GF timelevels=3 tags='tensortypealias="Scalar" tensorweight=0 tensorparity=1'
{
psi4rGF,psi4r0ptGF,psi4r1ptGF,psi4r2ptGF, psi4iGF
} "Psi4_group"
CCTK_REAL evol_variables type = GF Timelevels=3
{
aDD00GF,aDD01GF,aDD02GF,aDD11GF,aDD12GF,aDD22GF,alphaGF,cfGF,hDD00GF,hDD01GF,hDD02GF,hDD11GF,hDD12GF,hDD22GF,trKGF,vetU0GF,vetU1GF,vetU2GF
} "BSSN evolved gridfunctions, sans lambdaU and partial t beta"
```
We will now write the file `param.ccl`. This file allows the listed parameters to be set at runtime. We also give allowed ranges and default values for each parameter. More information on this file's syntax can be found in the [official Einstein Toolkit documentation](http://einsteintoolkit.org/usersguide/UsersGuidech12.html#x17-183000D2.3).
The first parameter specifies how many time levels need to be stored. Generally when using the ETK's adaptive-mesh refinement (AMR) driver [Carpet](https://carpetcode.org/), three timelevels are needed so that the diagnostic quantities can be properly interpolated and defined across refinement boundaries.
The second parameter determines how often we will calculate $\psi_4$, and the third parameter indicates whether just $\psi_4$, all Weyl scalars, or all Weyl scalars and invariants are going to be output. The third parameter is currently specified entirely within NRPy+, so by this point it is *not* a free parameter. Thus it is not quite correct to include it in this list of *free* parameters (FIXME).
```
%%writefile WeylScal4NRPD/param.ccl
restricted:
CCTK_INT timelevels "Number of active timelevels" STEERABLE=RECOVER
{
0:3 :: ""
} 3
restricted:
CCTK_INT WeylScal4NRPD_calc_every "WeylScal4_psi4_calc_Nth_calc_every" STEERABLE=ALWAYS
{
*:* :: ""
} 1
```
Finally, we will write the file `schedule.ccl`; its official documentation is found [here](http://einsteintoolkit.org/usersguide/UsersGuidech12.html#x17-186000D2.4). This file dictates when the various parts of the thorn will be run. We first assign storage for both the real and imaginary components of $\psi_4$, and then specify that we want our code run in the `MoL_PseudoEvolution` schedule group (consistent with the original `WeylScal4` Einstein Toolkit thorn), after the ADM variables are set. At this step, we declare that we will be writing code in C. We also specify the gridfunctions that we wish to read in from memory--in our case, we need all the components of $K_{ij}$ (the spatial extrinsic curvature) and $\gamma_{ij}$ (the physical [as opposed to conformal] 3-metric), in addition to the coordinate values. Note that the ETK adopts the widely-used convention that components of $\gamma_{ij}$ are prefixed in the code with $\text{g}$ and not $\gamma$.
```
%%writefile WeylScal4NRPD/schedule.ccl
STORAGE: NRPyPsi4_group[3], evol_variables[3]
STORAGE: ADMBase::metric[3], ADMBase::curv[3], ADMBase::lapse[3], ADMBase::shift[3]
schedule group WeylScal4NRPD_group in MoL_PseudoEvolution after ADMBase_SetADMVars
{
} "Schedule WeylScal4NRPD group"
schedule WeylScal4NRPD_ADM_to_BSSN in WeylScal4NRPD_group before weylscal4_mainfunction
{
LANG: C
} "Convert ADM into BSSN variables"
schedule WeylScal4NRPD_mainfunction in WeylScal4NRPD_group after WeylScal4NRPD_ADM_to_BSSN
{
LANG: C
} "Call WeylScal4NRPD main function"
```
<a id='etk_list'></a>
## Step 2.c: Tell the Einstein Toolkit to compile the C code \[Back to [top](#toc)\]
$$\label{etk_list}$$
The `make.code.defn` lists the source files that need to be compiled. Naturally, this thorn has only the one C file $-$ written above $-$ to compile:
```
%%writefile WeylScal4NRPD/src/make.code.defn
SRCS = WeylScal4NRPD.c ADM_to_BSSN.c
```
<a id='latex_pdf_output'></a>
# Step 3: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\]
$$\label{latex_pdf_output}$$
The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename
[Tutorial-ETK_thorn-Weyl_Scalars_and_Spacetime_Invariants.pdf](Tutorial-ETK_thorn-Weyl_Scalars_and_Spacetime_Invariants.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
```
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-ETK_thorn-WeylScal4NRPD")
```
| github_jupyter |
# Creating EEG Objects
## Epoch Creation
<a id="intro"></a>
```
from simpl_eeg import eeg_objects
```
<br>
### Module Overview
The `eeg_objects` module contains helper classes for storing and manipulating relevant information regarding epochs to pass to other package functions. It contains two classes. Typically you will only you use the `eeg_objects.Epochs` directly, which by default contains a `eeg_objects.EEG_File` object in the `eeg_file` attribute.
Below are the docstrings for the two classes:
```
# Class for reading and importing EEG files
help(eeg_objects.EEG_File)
# Class for storing, generating, and adjusting epoch objects
help(eeg_objects.Epochs)
```
<br>
### Define parameters
The only required parameter to create an epoch object is the `folder_path` for the experiment of interest, however additional parameters may be used to customize your epoch object.
- `file_name`
- If you specify a `file_name`, and the file exists in the `folder_path` directory, then it will be used as the main data file for the epoch.
- If you do not specify a `file_name` then the alphabetical first file with a supported main file type in `folder_path` will be automatically loaded.
- `events_file`
- If you specify an `events_file`, and the file exists in the `folder_path` directory, then it will be used as the events data file for the epoch.
- If you do not specify an `events_file` then the alphabetical first file with a supported events file type in `folder_path` will be automatically loaded.
- If you try to load an `events_file` (automatically or manually) with over 5,000 events or if the final column in the loaded dictionary does not contain a numerical value in its first index (both forms of error catching) then the file will be rejected and will not be loaded.
- If you want to force no events data to be loaded you can pass and `events_file` of `None`.
- `montage`
- If you specify a `montage`, it will load a standard montage with the specified name into the epoch data.
- If montage data already exists in the main data file and a `montage` is provided the original data overwritten in the epoch object.
- If you do not specify a `montage` and montage data already exists in the main data then it will be used instead.
- If you do not specify a `montage` and montage data does not exist in the main data then one attempt will be made to load a "easycap-M1" montage. If this fails then no montage information will be loaded.
- If you want to force no `montage` to be loaded data to be loaded you can pass and `events_file` of `None`.
- `start_second`
- If you specify a `start_second`, a single epoch will be generated with an impact event at the specified second.
- If you do not specify a `start_second`, epochs will be automatically generated using the impact times found in the `impact locations.mat` file in the selected `experiment_folder`.
- `tmin`
- specifies the number of seconds before the impact to use,
- `tmax`
- specifies the number of seconds after the impact.
```
# path to the experiment folder
folder_path = "../../data/109"
# the name of the main data file to load (optional)
file_name = "fixica.set"
# the name of the events file to load (optional)
events_file = "impact locations.mat"
# the montage type to load (optional)
montage = None
# number of seconds before the impact, should be a negative number for before impact (optional)
tmin = -1
# number of seconds after the impact (optional)
tmax = 1
# if creating a custom epoch, select a starting second (optional)
start_second = None
```
<br>
### Create epoched data
The following data formats are currently supported. Note that due to limited availability of test files not all formats have been fully tested (see Notes).
| | Main File | Secondary File | Events File | Notes |
|-----------------------|-----------|----------------|-------------|---------------------------------------------------------|
| EEGLAB | .set | .fdt | .mat | |
| BrainVision | .vhdr | .eeg | .vmrk | |
| European data format | .edf | N/A | N/A | |
| BioSemi data format | .bdf | N/A | N/A | Montage has not be successfully loaded with test files. |
| General data format | .gdf | N/A | N/A | Events have not be successfully loaded with test files. |
| Neuroscan CNT | .cnt | N/A | N/A | Montage has not be successfully loaded with test files. |
| eXimia | .nxe | N/A | N/A | Events have not be successfully loaded with test files. |
| Nihon Kohden EEG data | .eeg | .pnt AND .21e | .log | Montage has not be successfully loaded with test files. |
- A **main file** represents the lead file used to load in your EEG data. This is the file that may be passed as your `file_name`.
- A **secondary file** contains some secondary information for some data types. They will be automatically loaded to when the main file is loaded.
- A **events file** contains a list of the annotations associated with events in your EEG data. This is the file that may be passed as your `events_file`.
- A **montage** must exist in your epoch in order to visualize it. This contains information about your node locations in 3D space. A complete list of usable montages is available here: https://mne.tools/dev/generated/mne.channels.make_standard_montage.html.
You can create epoched data using the `Epochs` class.
```
epochs = eeg_objects.Epochs(
folder_path = folder_path,
file_name = file_name,
events_file = events_file,
montage = montage,
tmin = tmin,
tmax = tmax,
start_second = start_second
)
```
The generated epoch data is found within the `all_epochs` attribute. Here we are generating epochs with automatically detected impact times, so we can see that there are multiple events.
```
epochs.all_epochs
```
If instead we create epochs with a custom start second, we will only create a single epoch with an impact the given `start_second`.
```
start_second = 15 # record event at second 15
custom_epoch = eeg_objects.Epochs(folder_path, tmin, tmax, start_second)
custom_epoch.all_epochs
```
#### Get information about epochs
In addition to the epochs contained in the `all_epochs` attribute, the `Epoch` object also contains information about the file used and has a selected epoch for quick access.
```
eeg_file = epochs.eeg_file
print(eeg_file.folder_path) # experiment folder path
print(eeg_file.experiment) # experiment number
print(eeg_file.raw) # raw data
print(eeg_file.file_source) # primary data file the EEG data was loaded from
print(eeg_file.events_source) # source file of events
print(eeg_file.montage_source) # source of the montage (may be pre-set montage name)
print(eeg_file.events) # impact times
```
#### Select specific epoch
If you have a specific epoch of interest you can specify it with the `get_epoch` method. You can retrieve it later by accessing the `epoch` attribute.
```
nth_epoch = 5 # the epoch of interest to select, the 6th impact
single_epoch = epochs.get_epoch(nth_epoch)
single_epoch
epochs.epoch
```
#### Getting an evoked object
You can also use the `get_epoch` method to retrieve an evoked object, which represents an averaging of each event in your epoch. Note that evoked data is its own type of object and is not guaranteed to work with every function in this package.
```
evoked = epochs.get_epoch("evoked")
type(evoked)
evoked.info
```
#### Decimate the epoch (optional)
To reduce the size of the selected epoch you can choose to skip a selected number of time steps by calling the `skip_n_steps` method. If `use_single=True` (the default), it will only be run on the current selected epoch from the previous step, contained in the `epoch` attribute. Otherwise it will run on all the epochs contained within the `all_epochs` attribute.
Skipping steps will greatly reduce animation times for the other functions in the package. The greater the number of steps skipped, the fewer the frames to animate. In the example below we are reducing the selected epoch from 4097 time steps to 81 time steps.
```
single_epoch.get_data().shape
num_steps = 50
smaller_epoch = epochs.skip_n_steps(num_steps)
smaller_epoch.get_data().shape
```
#### Average the epoch (optional)
To reduce the size of the selected epoch you can choose to average a selected number of time steps by calling the `average_n_steps` method. It will be run on the current selected epoch from the previous step, contained in the `epoch` attribute.
Averaging works the same way as decimating above, but instead of simply ignoring records between steps it takes an average.
```
num_steps = 50
average_epoch = epochs.average_n_steps(num_steps)
average_epoch.get_data()
```
### MNE functions
Now that you have access epoched data, you can use the `simpl_eeg` package functions as well as any [MNE functions](https://mne.tools/stable/generated/mne.Epochs.html) which act on `mne.epoch` objects. Below are some useful examples for the MNE objects contained within the object we created.
#### Raw data
https://mne.tools/stable/generated/mne.io.Raw.html
```
raw = epochs.eeg_file.raw
raw.info
raw.plot_psd();
```
#### Epoch data
```
# first 3 epochs
epochs.all_epochs.plot(n_epochs=3);
# specific epoch
epochs.epoch.plot();
# specific epoch with steps skipped
epochs.skip_n_steps(100).plot();
```
| github_jupyter |
<a href="http://cocl.us/pytorch_link_top">
<img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/Pytochtop.png" width="750" alt="IBM Product " />
</a>
<img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/cc-logo-square.png" width="200" alt="cognitiveclass.ai logo" />
<h1>Neural Networks with Momentum</h1>
<h2>Table of Contents</h2>
<p>In this lab, you will see how different values for the momentum parameters affect the convergence rate of a neural network.</p>
<ul>
<li><a href="#Model">Neural Network Module and Function for Training</a></li>
<li><a href="#Train">Train Different Neural Networks Model different values for the Momentum Parameter</a></li>
<li><a href="#Result">Compare Results of Different Momentum Terms</a></li>
</ul>
<p>Estimated Time Needed: <strong>25 min</strong></p>
<hr>
<h2>Preparation</h2>
We'll need the following libraries:
```
# Import the libraries for this lab
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.colors import ListedColormap
from torch.utils.data import Dataset, DataLoader
torch.manual_seed(1)
np.random.seed(1)
```
Functions used to plot:
```
# Define a function for plot the decision region
def plot_decision_regions_3class(model, data_set):
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA','#00AAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00','#00AAFF'])
X=data_set.x.numpy()
y=data_set.y.numpy()
h = .02
x_min, x_max = X[:, 0].min() - 0.1 , X[:, 0].max() + 0.1
y_min, y_max = X[:, 1].min() - 0.1 , X[:, 1].max() + 0.1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),np.arange(y_min, y_max, h))
XX=torch.torch.Tensor(np.c_[xx.ravel(), yy.ravel()])
_,yhat=torch.max(model(XX),1)
yhat=yhat.numpy().reshape(xx.shape)
plt.pcolormesh(xx, yy, yhat, cmap=cmap_light)
plt.plot(X[y[:]==0,0], X[y[:]==0,1], 'ro', label='y=0')
plt.plot(X[y[:]==1,0], X[y[:]==1,1], 'go', label='y=1')
plt.plot(X[y[:]==2,0], X[y[:]==2,1], 'o', label='y=2')
plt.title("decision region")
plt.legend()
```
Create the dataset class
```
# Create the dataset class
class Data(Dataset):
# modified from: http://cs231n.github.io/neural-networks-case-study/
# Constructor
def __init__(self, K=3, N=500):
D = 2
X = np.zeros((N * K, D)) # data matrix (each row = single example)
y = np.zeros(N * K, dtype='uint8') # class labels
for j in range(K):
ix = range(N * j, N * (j + 1))
r = np.linspace(0.0, 1, N) # radius
t = np.linspace(j * 4, (j + 1) * 4, N) + np.random.randn(N) * 0.2 # theta
X[ix] = np.c_[r * np.sin(t), r * np.cos(t)]
y[ix] = j
self.y = torch.from_numpy(y).type(torch.LongTensor)
self.x = torch.from_numpy(X).type(torch.FloatTensor)
self.len = y.shape[0]
# Getter
def __getitem__(self, index):
return self.x[index], self.y[index]
# Get Length
def __len__(self):
return self.len
# Plot the diagram
def plot_data(self):
plt.plot(self.x[self.y[:] == 0, 0].numpy(), self.x[self.y[:] == 0, 1].numpy(), 'o', label="y=0")
plt.plot(self.x[self.y[:] == 1, 0].numpy(), self.x[self.y[:] == 1, 1].numpy(), 'ro', label="y=1")
plt.plot(self.x[self.y[:] == 2, 0].numpy(),self.x[self.y[:] == 2, 1].numpy(), 'go',label="y=2")
plt.legend()
```
<!--Empty Space for separating topics-->
<h2 id="Model">Neural Network Module and Function for Training</h2>
Create Neural Network Module using <code>ModuleList()</code>
```
# Create dataset object
class Net(nn.Module):
# Constructor
def __init__(self, Layers):
super(Net, self).__init__()
self.hidden = nn.ModuleList()
for input_size, output_size in zip(Layers, Layers[1:]):
self.hidden.append(nn.Linear(input_size, output_size))
# Prediction
def forward(self, activation):
L = len(self.hidden)
for (l, linear_transform) in zip(range(L), self.hidden):
if l < L - 1:
activation = F.relu(linear_transform(activation))
else:
activation = linear_transform(activation)
return activation
```
Create the function for training the model.
```
# Define the function for training the model
def train(data_set, model, criterion, train_loader, optimizer, epochs=100):
LOSS = []
ACC = []
for epoch in range(epochs):
for x, y in train_loader:
optimizer.zero_grad()
yhat = model(x)
loss = criterion(yhat, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
LOSS.append(loss.item())
ACC.append(accuracy(model,data_set))
results ={"Loss":LOSS, "Accuracy":ACC}
fig, ax1 = plt.subplots()
color = 'tab:red'
ax1.plot(LOSS,color=color)
ax1.set_xlabel('epoch', color=color)
ax1.set_ylabel('total loss', color=color)
ax1.tick_params(axis = 'y', color=color)
ax2 = ax1.twinx()
color = 'tab:blue'
ax2.set_ylabel('accuracy', color=color) # we already handled the x-label with ax1
ax2.plot(ACC, color=color)
ax2.tick_params(axis='y', color=color)
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.show()
return results
```
Define a function used to calculate accuracy.
```
# Define a function for calculating accuracy
def accuracy(model, data_set):
_, yhat = torch.max(model(data_set.x), 1)
return (yhat == data_set.y).numpy().mean()
```
<!--Empty Space for separating topics-->
<h2 id="Train">Train Different Networks Model different values for the Momentum Parameter</h2>
Crate a dataset object using <code>Data</code>
```
# Create the dataset and plot it
data_set = Data()
data_set.plot_data()
data_set.y = data_set.y.view(-1)
```
Dictionary to contain different cost and accuracy values for each epoch for different values of the momentum parameter.
```
# Initialize a dictionary to contain the cost and accuracy
Results = {"momentum 0": {"Loss": 0, "Accuracy:": 0}, "momentum 0.1": {"Loss": 0, "Accuracy:": 0}}
```
Create a network to classify three classes with 1 hidden layer with 50 neurons and a momentum value of zero.
```
# Train a model with 1 hidden layer and 50 neurons
Layers = [2, 50, 3]
model = Net(Layers)
learning_rate = 0.10
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
train_loader = DataLoader(dataset=data_set, batch_size=20)
criterion = nn.CrossEntropyLoss()
Results["momentum 0"] = train(data_set, model, criterion, train_loader, optimizer, epochs=100)
plot_decision_regions_3class(model, data_set)
```
Create a network to classify three classes with 1 hidden layer with 50 neurons and a momentum value of 0.1.
```
# Train a model with 1 hidden layer and 50 neurons with 0.1 momentum
Layers = [2, 50, 3]
model = Net(Layers)
learning_rate = 0.10
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.1)
train_loader = DataLoader(dataset=data_set, batch_size=20)
criterion = nn.CrossEntropyLoss()
Results["momentum 0.1"] = train(data_set, model, criterion, train_loader, optimizer, epochs=100)
plot_decision_regions_3class(model, data_set)
```
Create a network to classify three classes with 1 hidden layer with 50 neurons and a momentum value of 0.2.
```
# Train a model with 1 hidden layer and 50 neurons with 0.2 momentum
Layers = [2, 50, 3]
model = Net(Layers)
learning_rate = 0.10
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.2)
train_loader = DataLoader(dataset=data_set, batch_size=20)
criterion = nn.CrossEntropyLoss()
Results["momentum 0.2"] = train(data_set, model, criterion, train_loader, optimizer, epochs=100)
plot_decision_regions_3class(model, data_set)
```
Create a network to classify three classes with 1 hidden layer with 50 neurons and a momentum value of 0.4.
```
# Train a model with 1 hidden layer and 50 neurons with 0.4 momentum
Layers = [2, 50, 3]
model = Net(Layers)
learning_rate = 0.10
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.4)
train_loader = DataLoader(dataset=data_set, batch_size=20)
criterion = nn.CrossEntropyLoss()
Results["momentum 0.4"] = train(data_set, model, criterion, train_loader, optimizer, epochs=100)
plot_decision_regions_3class(model, data_set)
```
Create a network to classify three classes with 1 hidden layer with 50 neurons and a momentum value of 0.5.
```
# Train a model with 1 hidden layer and 50 neurons with 0.5 momentum
Layers = [2, 50, 3]
model = Net(Layers)
learning_rate = 0.10
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.5)
train_loader = DataLoader(dataset=data_set, batch_size=20)
criterion = nn.CrossEntropyLoss()
Results["momentum 0.5"] = train(data_set, model, criterion, train_loader, optimizer, epochs=100)
plot_decision_regions_3class(model,data_set)
```
<!--Empty Space for separating topics-->
<h2 id="Result">Compare Results of Different Momentum Terms</h2>
The plot below compares results of different momentum terms. We see that in general. The Cost decreases proportionally to the momentum term, but larger momentum terms lead to larger oscillations. While the momentum term decreases faster, it seems that a momentum term of 0.2 reaches the smallest value for the cost.
```
# Plot the Loss result for each term
for key, value in Results.items():
plt.plot(value['Loss'],label=key)
plt.legend()
plt.xlabel('epoch')
plt.ylabel('Total Loss or Cost')
```
The accuracy seems to be proportional to the momentum term.
```
# Plot the Accuracy result for each term
for key, value in Results.items():
plt.plot(value['Accuracy'],label=key)
plt.legend()
plt.xlabel('epoch')
plt.ylabel('Accuracy')
```
<!--Empty Space for separating topics-->
<a href="http://cocl.us/pytorch_link_bottom">
<img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/notebook_bottom%20.png" width="750" alt="PyTorch Bottom" />
</a>
<h2>About the Authors:</h2>
<a href="https://www.linkedin.com/in/joseph-s-50398b136/">Joseph Santarcangelo</a> has a PhD in Electrical Engineering, his research focused on using machine learning, signal processing, and computer vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.
Other contributors: <a href="https://www.linkedin.com/in/michelleccarey/">Michelle Carey</a>, <a href="www.linkedin.com/in/jiahui-mavis-zhou-a4537814a">Mavis Zhou</a>
<hr>
Copyright © 2018 <a href="cognitiveclass.ai?utm_source=bducopyrightlink&utm_medium=dswb&utm_campaign=bdu">cognitiveclass.ai</a>. This notebook and its source code are released under the terms of the <a href="https://bigdatauniversity.com/mit-license/">MIT License</a>.
| github_jupyter |
# Multiscale Basics Tutorial
*By R. Bulanadi, 28/01/20*
***
While Project Multiscale is currently very powerful, it has a slight learning curve to understand the required functions for basic use. This notebook has been written to teach the basics of using Project Multiscale functions, by binarising the Phase channels of microscopy data obtained from a Cypher Asylum AFM.
To use Project Multiscale, the Multiscale package must be loaded. Load it as below, being sure to change the directory to lead to your Multiscale package.
```
import sys
sys.path.insert(0, '../../') #Change to your Multiscale Directory
from multiscale.processing import twodim
from multiscale.processing import core as pt
from multiscale.processing import plot as msplt
import multiscale.io
```
We will now convert our raw data (`.ibw` format) into the `.hdf5` format used by Project Multiscale. First, we will set the name of both our raw `.ibw` file, and the new `.hdf5` file.
```
original_filename = 'SD_P4_zB5_050mV_-2550mV_0002.ibw'
filename = original_filename.split('.')[0]+'.hdf5'
```
The `multiscale.io` package handles file conversion. In general, one can call `multiscale.io.read_file.tohdf5` to convert the data type.
*If the data type is not currently compatible, either code a conversion function or ask Loic/Ralph/Iaroslav.*
```
multiscale.io.read_file.tohdf5(original_filename)
```
If you open the newly produced file `SD_P4_zB5_050mV_-2550mV_0002` in HDFView, you will see four folders:
1. **`datasets`** contains the main converted data from the .ibw files. It contains a subfolder for each of the original scans (in this case, only one), and each of these subfolders contain the 8 data channels obtained from the raw data.
2. **`metadata`** contains all other data obtained from the .ibw files, except for the image itself, such as the scan rate or tip voltage.
3. **`process`** is currently empty, but will eventually contain the results of our subsequent processing.
4. **`type`** indicates the original filetype of the data - that is, 'ibw'.
**Warning: HDFView prevents Python from operating on open .hdf5 files. Make sure to close the open files before proceeding!**
***
Before we do any processing, let's just check if things work. The function `msplt.save_image` lets us save an image from an array - however, our array is stored in the `.hdf5` file, and Python does not currently know about it. To use `msplt.save_image` then, we call it using the `pt.m_apply` function.
In short, `pt.m_apply` lets us pass the location of the files within the `.hdf5` file, instead of an actual array. This makes handling several datasets much easier. For now, the main function call of `pt.m_apply` is of the format:
`m_apply(filename, function, in_paths)`
1. **`filename`** The name of the `.hdf5` file we are using. We set this earlier to be `'SD_P4_zB5_050mV_-2550mV_0002.hdf5'`
2. **`function`** The function we are applying. In this case, we are going to use the function `msplt.save_image`.
3. **`in_paths`** This is the path (or paths) to the data within the `.hdf5` file. If you look in HDFView, you can see the file directory. In this case, let's look at the `Phase1Trace` channel in `datasets`. We will thus set this argument to `'datasets/SD_P4_zB5_050mV_-2550mV_0002/Phase1Trace'`
**Note:** Other arguments exist, but are beyond this scope. See Intermediate or Programming tutorials for more detail
```
pt.m_apply(filename, msplt.save_image, 'datasets/SD_P4_zB5_050mV_-2550mV_0002/Phase2Retrace', image_name = 'Original_Phase', show=True)
```
You might notice we added extra arguments to `m_apply`. In general, if `m_apply` is given extra arguments, these arguments are passed to the subfunction: in this case, `msplt.save_image`. Thus, `msplt.save_image` knows to set `image_name` to `'Original_Phase'`, and to set `show` to `True`. You should now also see the image saved in this fiel directory; if you want, you could change this by changing the variable `saving_path`
***
Now that we have something to compare to, we can begin processing. We are going to linearise the phase of this image (that is, transform the phase, which is currently an angle between -90 and 270, and wrapping at that limit) to a number between 0 and 1. To do this, we are going to use the function phase_linearisation, which we will again call using `m_apply`:
```
pt.m_apply(filename, twodim.phase_linearisation, 'datasets/SD_P4_zB5_050mV_-2550mV_0002/Phase2Retrace')
print('Linearisation Complete!')
```
If you open HDFView right now, you should see a new folder in `process` called `001-phase_linearisation` which contains the newly linearised data. If an error did occur at some point, you might also see other files of the form `abc-phase_linearisation`, where abc is some number. Don't worry; simply mark the correct (or incorrect) ones, and change the path names of the next function calls to ensure it goes to the correct folder.
***
Now that the data is linearised, we can now binarise it. This is simply a threshold function. This is called very similarly to the last function, except for the different function call, and the different path location. Feel free to look at the code itself in the `twodim` subpackage if y7ou want to see how this code works, or if you want to pass it other arguments.
```
pt.m_apply(filename, twodim.phase_binarisation, 'process/001-phase_linearisation/SD_P4_zB5_050mV_-2550mV_0002/Phase2Retrace')
print('Binarisation Complete!')
```
Finally, we can view our final image. This requires the `msplt.save_image` function, which we used earlier.
```
pt.m_apply(filename, msplt.save_image, 'process/002-phase_binarisation/SD_P4_zB5_050mV_-2550mV_0002/Phase2Retrace', image_name = 'Binarised_Phase', show=True)
```
If we want to, we can also go back and see the intermediate, linearised phase:
```
pt.m_apply(filename, msplt.save_image, 'process/001-phase_linearisation/SD_P4_zB5_050mV_-2550mV_0002/Phase2Retrace', image_name = 'Linearised_Phase', show=True)
```
This ends the basic multiscale tutorial. As shown so far, Multiscale allows you to keep track of all of your variables and intermediate steps. Since they are saved permanently to the `.hdf5` file, they will remain so long as you don't delete it. Any function that works with arrays can also be passed directly into m_apply, and you also gain access to the current repository of functions.
If you want to apply on multiple datafiles concurrently, or use more complicated functions that require thus (such as distortion correction) please see the Intermediate tutorial. If you want to use Multiscale in more depth after, please check the Programming tutorial.
***
## Troubleshooting
**OSError: Unable to create file**
Close the file in HDFView!
**KeyError: 'Unable to open object (component not found)'**
Make sure your `in_path` is correct. Open the file, and make sure that all your process numbers (ie, the 002) is the same as in your function call.
| github_jupyter |
## Our Mission ##
Spam detection is one of the major applications of Machine Learning in the interwebs today. Pretty much all of the major email service providers have spam detection systems built in and automatically classify such mail as 'Junk Mail'.
In this mission we will be using the Naive Bayes algorithm to create a model that can classify SMS messages as spam or not spam, based on the training we give to the model. It is important to have some level of intuition as to what a spammy text message might look like. Often they have words like 'free', 'win', 'winner', 'cash', 'prize' and the like in them as these texts are designed to catch your eye and in some sense tempt you to open them. Also, spam messages tend to have words written in all capitals and also tend to use a lot of exclamation marks. To the human recipient, it is usually pretty straightforward to identify a spam text and our objective here is to train a model to do that for us!
Being able to identify spam messages is a binary classification problem as messages are classified as either 'Spam' or 'Not Spam' and nothing else. Also, this is a supervised learning problem, as we will be feeding a labelled dataset into the model, that it can learn from, to make future predictions.
# Overview
This project has been broken down in to the following steps:
- Step 0: Introduction to the Naive Bayes Theorem
- Step 1.1: Understanding our dataset
- Step 1.2: Data Preprocessing
- Step 2.1: Bag of Words (BoW)
- Step 2.2: Implementing BoW from scratch
- Step 2.3: Implementing Bag of Words in scikit-learn
- Step 3.1: Training and testing sets
- Step 3.2: Applying Bag of Words processing to our dataset.
- Step 4.1: Bayes Theorem implementation from scratch
- Step 4.2: Naive Bayes implementation from scratch
- Step 5: Naive Bayes implementation using scikit-learn
- Step 6: Evaluating our model
- Step 7: Conclusion
**Note**: If you need help with a step, you can find the solution notebook by clicking on the Jupyter logo in the top left of the notebook.
### Step 0: Introduction to the Naive Bayes Theorem ###
Bayes Theorem is one of the earliest probabilistic inference algorithms. It was developed by Reverend Bayes (which he used to try and infer the existence of God no less), and still performs extremely well for certain use cases.
It's best to understand this theorem using an example. Let's say you are a member of the Secret Service and you have been deployed to protect the Democratic presidential nominee during one of his/her campaign speeches. Being a public event that is open to all, your job is not easy and you have to be on the constant lookout for threats. So one place to start is to put a certain threat-factor for each person. So based on the features of an individual, like age, whether the person is carrying a bag, looks nervous, etc., you can make a judgment call as to whether that person is a viable threat.
If an individual ticks all the boxes up to a level where it crosses a threshold of doubt in your mind, you can take action and remove that person from the vicinity. Bayes Theorem works in the same way, as we are computing the probability of an event (a person being a threat) based on the probabilities of certain related events (age, presence of bag or not, nervousness of the person, etc.).
One thing to consider is the independence of these features amongst each other. For example if a child looks nervous at the event then the likelihood of that person being a threat is not as much as say if it was a grown man who was nervous. To break this down a bit further, here there are two features we are considering, age AND nervousness. Say we look at these features individually, we could design a model that flags ALL persons that are nervous as potential threats. However, it is likely that we will have a lot of false positives as there is a strong chance that minors present at the event will be nervous. Hence by considering the age of a person along with the 'nervousness' feature we would definitely get a more accurate result as to who are potential threats and who aren't.
This is the 'Naive' bit of the theorem where it considers each feature to be independent of each other which may not always be the case and hence that can affect the final judgement.
In short, Bayes Theorem calculates the probability of a certain event happening (in our case, a message being spam) based on the joint probabilistic distributions of certain other events (in our case, the appearance of certain words in a message). We will dive into the workings of Bayes Theorem later in the mission, but first, let us understand the data we are going to work with.
### Step 1.1: Understanding our dataset ###
We will be using a dataset originally compiled and posted on the UCI Machine Learning repository which has a very good collection of datasets for experimental research purposes. If you're interested, you can review the [abstract](https://archive.ics.uci.edu/ml/datasets/SMS+Spam+Collection) and the original [compressed data file](https://archive.ics.uci.edu/ml/machine-learning-databases/00228/) on the UCI site. For this exercise, however, we've gone ahead and downloaded the data for you.
**Here's a preview of the data:**
<img src="images/dqnb.png" height="1242" width="1242">
The columns in the data set are currently not named and as you can see, there are 2 columns.
The first column takes two values, 'ham' which signifies that the message is not spam, and 'spam' which signifies that the message is spam.
The second column is the text content of the SMS message that is being classified.
>**Instructions:**
* Import the dataset into a pandas dataframe using the **read_table** method. The file has already been downloaded, and you can access it using the filepath 'smsspamcollection/SMSSpamCollection'. Because this is a tab separated dataset we will be using '\\t' as the value for the 'sep' argument which specifies this format.
* Also, rename the column names by specifying a list ['label', 'sms_message'] to the 'names' argument of read_table().
* Print the first five values of the dataframe with the new column names.
```
# '!' allows you to run bash commands from jupyter notebook.
print("List all the files in the current directory\n")
!ls
# The required data table can be found under smsspamcollection/SMSSpamCollection
print("\n List all the files inside the smsspamcollection directory\n")
!ls smsspamcollection
!cat smsspamcollection/SMSSpamCollection
import pandas as pd
# Dataset available using filepath 'smsspamcollection/SMSSpamCollection'
df = pd.read_table("smsspamcollection/SMSSpamCollection", names=['label', 'sms_message'] )
# Output printing out first 5 rows
df[:5]
```
### Step 1.2: Data Preprocessing ###
Now that we have a basic understanding of what our dataset looks like, let's convert our labels to binary variables, 0 to represent 'ham'(i.e. not spam) and 1 to represent 'spam' for ease of computation.
You might be wondering why do we need to do this step? The answer to this lies in how scikit-learn handles inputs. Scikit-learn only deals with numerical values and hence if we were to leave our label values as strings, scikit-learn would do the conversion internally(more specifically, the string labels will be cast to unknown float values).
Our model would still be able to make predictions if we left our labels as strings but we could have issues later when calculating performance metrics, for example when calculating our precision and recall scores. Hence, to avoid unexpected 'gotchas' later, it is good practice to have our categorical values be fed into our model as integers.
>**Instructions:**
* Convert the values in the 'label' column to numerical values using map method as follows:
{'ham':0, 'spam':1} This maps the 'ham' value to 0 and the 'spam' value to 1.
* Also, to get an idea of the size of the dataset we are dealing with, print out number of rows and columns using
'shape'.
```
'''
Solution
'''
df['names'] = df.label.map(lambda x: 1 if x == "spam" else 0)
```
### Step 2.1: Bag of Words ###
What we have here in our data set is a large collection of text data (5,572 rows of data). Most ML algorithms rely on numerical data to be fed into them as input, and email/sms messages are usually text heavy.
Here we'd like to introduce the Bag of Words (BoW) concept which is a term used to specify the problems that have a 'bag of words' or a collection of text data that needs to be worked with. The basic idea of BoW is to take a piece of text and count the frequency of the words in that text. It is important to note that the BoW concept treats each word individually and the order in which the words occur does not matter.
Using a process which we will go through now, we can convert a collection of documents to a matrix, with each document being a row and each word (token) being the column, and the corresponding (row, column) values being the frequency of occurrence of each word or token in that document.
For example:
Let's say we have 4 documents, which are text messages
in our case, as follows:
`['Hello, how are you!',
'Win money, win from home.',
'Call me now',
'Hello, Call you tomorrow?']`
Our objective here is to convert this set of texts to a frequency distribution matrix, as follows:
<img src="images/countvectorizer.png" height="542" width="542">
Here as we can see, the documents are numbered in the rows, and each word is a column name, with the corresponding value being the frequency of that word in the document.
Let's break this down and see how we can do this conversion using a small set of documents.
To handle this, we will be using sklearn's
[count vectorizer](http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html#sklearn.feature_extraction.text.CountVectorizer) method which does the following:
* It tokenizes the string (separates the string into individual words) and gives an integer ID to each token.
* It counts the occurrence of each of those tokens.
**Please Note:**
* The CountVectorizer method automatically converts all tokenized words to their lower case form so that it does not treat words like 'He' and 'he' differently. It does this using the `lowercase` parameter which is by default set to `True`.
* It also ignores all punctuation so that words followed by a punctuation mark (for example: 'hello!') are not treated differently than the same words not prefixed or suffixed by a punctuation mark (for example: 'hello'). It does this using the `token_pattern` parameter which has a default regular expression which selects tokens of 2 or more alphanumeric characters.
* The third parameter to take note of is the `stop_words` parameter. Stop words refer to the most commonly used words in a language. They include words like 'am', 'an', 'and', 'the', etc. By setting this parameter value to `english`, CountVectorizer will automatically ignore all words (from our input text) that are found in the built in list of English stop words in scikit-learn. This is extremely helpful as stop words can skew our calculations when we are trying to find certain key words that are indicative of spam.
We will dive into the application of each of these into our model in a later step, but for now it is important to be aware of such preprocessing techniques available to us when dealing with textual data.
### Step 2.2: Implementing Bag of Words from scratch ###
Before we dive into scikit-learn's Bag of Words (BoW) library to do the dirty work for us, let's implement it ourselves first so that we can understand what's happening behind the scenes.
**Step 1: Convert all strings to their lower case form.**
Let's say we have a document set:
```
documents = ['Hello, how are you!',
'Win money, win from home.',
'Call me now.',
'Hello, Call hello you tomorrow?']
```
>>**Instructions:**
* Convert all the strings in the documents set to their lower case. Save them into a list called 'lower_case_documents'. You can convert strings to their lower case in python by using the lower() method.
```
'''
Solution:
'''
documents = ['Hello, how are you!',
'Win money, win from home.',
'Call me now.',
'Hello, Call hello you tomorrow?']
lower_case_documents = [w.lower() for w in documents]
print(lower_case_documents)
```
**Step 2: Removing all punctuation**
>>**Instructions:**
Remove all punctuation from the strings in the document set. Save the strings into a list called
'sans_punctuation_documents'.
```
'''
Solution:
'''
punctuation = ",.?!"
import string
sans_punctuation_documents = [w.translate({ord(c): None for c in ".,_!?"})for w in lower_case_documents]
print(sans_punctuation_documents)
```
**Step 3: Tokenization**
Tokenizing a sentence in a document set means splitting up the sentence into individual words using a delimiter. The delimiter specifies what character we will use to identify the beginning and end of a word. Most commonly, we use a single space as the delimiter character for identifying words, and this is true in our documents in this case also.
>>**Instructions:**
Tokenize the strings stored in 'sans_punctuation_documents' using the split() method. Store the final document set
in a list called 'preprocessed_documents'.
```
'''
Solution:
'''
import itertools
preprocessed_documents = [w.split() for w in sans_punctuation_documents]
preprocessed_documents = list(itertools.chain(*preprocessed_documents))
print(preprocessed_documents)
```
**Step 4: Count frequencies**
Now that we have our document set in the required format, we can proceed to counting the occurrence of each word in each document of the document set. We will use the `Counter` method from the Python `collections` library for this purpose.
`Counter` counts the occurrence of each item in the list and returns a dictionary with the key as the item being counted and the corresponding value being the count of that item in the list.
>>**Instructions:**
Using the Counter() method and preprocessed_documents as the input, create a dictionary with the keys being each word in each document and the corresponding values being the frequency of occurrence of that word. Save each Counter dictionary as an item in a list called 'frequency_list'.
```
'''
Solution
'''
frequency_list = []
import pprint
from collections import Counter
frequency_list = Counter(preprocessed_documents)
pprint.pprint(frequency_list)
```
Congratulations! You have implemented the Bag of Words process from scratch! As we can see in our previous output, we have a frequency distribution dictionary which gives a clear view of the text that we are dealing with.
We should now have a solid understanding of what is happening behind the scenes in the `sklearn.feature_extraction.text.CountVectorizer` method of scikit-learn.
We will now implement `sklearn.feature_extraction.text.CountVectorizer` method in the next step.
### Step 2.3: Implementing Bag of Words in scikit-learn ###
Now that we have implemented the BoW concept from scratch, let's go ahead and use scikit-learn to do this process in a clean and succinct way. We will use the same document set as we used in the previous step.
```
'''
Here we will look to create a frequency matrix on a smaller document set to make sure we understand how the
document-term matrix generation happens. We have created a sample document set 'documents'.
'''
documents = ['Hello, how are you!',
'Win money, win from home.',
'Call me now.',
'Hello, Call hello you tomorrow?']
```
>>**Instructions:**
Import the sklearn.feature_extraction.text.CountVectorizer method and create an instance of it called 'count_vector'.
```
'''
Solution
'''
from sklearn.feature_extraction.text import CountVectorizer
count_vector = CountVectorizer(documents)
count_vector
```
**Data preprocessing with CountVectorizer()**
In Step 2.2, we implemented a version of the CountVectorizer() method from scratch that entailed cleaning our data first. This cleaning involved converting all of our data to lower case and removing all punctuation marks. CountVectorizer() has certain parameters which take care of these steps for us. They are:
* `lowercase = True`
The `lowercase` parameter has a default value of `True` which converts all of our text to its lower case form.
* `token_pattern = (?u)\\b\\w\\w+\\b`
The `token_pattern` parameter has a default regular expression value of `(?u)\\b\\w\\w+\\b` which ignores all punctuation marks and treats them as delimiters, while accepting alphanumeric strings of length greater than or equal to 2, as individual tokens or words.
* `stop_words`
The `stop_words` parameter, if set to `english` will remove all words from our document set that match a list of English stop words defined in scikit-learn. Considering the small size of our dataset and the fact that we are dealing with SMS messages and not larger text sources like e-mail, we will not use stop words, and we won't be setting this parameter value.
You can take a look at all the parameter values of your `count_vector` object by simply printing out the object as follows:
```
'''
Practice node:
Print the 'count_vector' object which is an instance of 'CountVectorizer()'
'''
# No need to revise this code
print(count_vector)
```
>>**Instructions:**
Fit your document dataset to the CountVectorizer object you have created using fit(), and get the list of words
which have been categorized as features using the get_feature_names() method.
```
'''
Solution:
'''
# No need to revise this code
count_vector.fit(documents)
count_vector.get_feature_names()
```
The `get_feature_names()` method returns our feature names for this dataset, which is the set of words that make up our vocabulary for 'documents'.
>>**Instructions:**
Create a matrix with each row representing one of the 4 documents, and each column representing a word (feature name).
Each value in the matrix will represent the frequency of the word in that column occurring in the particular document in that row.
You can do this using the transform() method of CountVectorizer, passing in the document data set as the argument. The transform() method returns a matrix of NumPy integers, which you can convert to an array using
toarray(). Call the array 'doc_array'.
```
'''
Solution
'''
doc_array = [d for d in documents]
doc_array
```
Now we have a clean representation of the documents in terms of the frequency distribution of the words in them. To make it easier to understand our next step is to convert this array into a dataframe and name the columns appropriately.
>>**Instructions:**
Convert the 'doc_array' we created into a dataframe, with the column names as the words (feature names). Call the dataframe 'frequency_matrix'.
```
'''
Solution
'''
import pandas as pd
frequency_matrix = pd.DataFrame(doc_array)
frequency_matrix
```
Congratulations! You have successfully implemented a Bag of Words problem for a document dataset that we created.
One potential issue that can arise from using this method is that if our dataset of text is extremely large (say if we have a large collection of news articles or email data), there will be certain values that are more common than others simply due to the structure of the language itself. For example, words like 'is', 'the', 'an', pronouns, grammatical constructs, etc., could skew our matrix and affect our analyis.
There are a couple of ways to mitigate this. One way is to use the `stop_words` parameter and set its value to `english`. This will automatically ignore all the words in our input text that are found in a built-in list of English stop words in scikit-learn.
Another way of mitigating this is by using the [tfidf](http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html#sklearn.feature_extraction.text.TfidfVectorizer) method. This method is out of scope for the context of this lesson.
### Step 3.1: Training and testing sets ###
Now that we understand how to use the Bag of Words approach, we can return to our original, larger UCI dataset and proceed with our analysis. Our first step is to split our dataset into a training set and a testing set so we can first train, and then test our model.
>>**Instructions:**
Split the dataset into a training and testing set using the train_test_split method in sklearn, and print out the number of rows we have in each of our training and testing data. Split the data
using the following variables:
* `X_train` is our training data for the 'sms_message' column.
* `y_train` is our training data for the 'label' column
* `X_test` is our testing data for the 'sms_message' column.
* `y_test` is our testing data for the 'label' column.
```
'''
Solution
'''
# split into training and testing sets
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(df['sms_message'],
df['label'],
random_state=1)
print('Number of rows in the total set: {}'.format(df.shape[0]))
print('Number of rows in the training set: {}'.format(X_train.shape[0]))
print('Number of rows in the test set: {}'.format(X_test.shape[0]))
```
### Step 3.2: Applying Bag of Words processing to our dataset. ###
Now that we have split the data, our next objective is to follow the steps from "Step 2: Bag of Words," and convert our data into the desired matrix format. To do this we will be using CountVectorizer() as we did before. There are two steps to consider here:
* First, we have to fit our training data (`X_train`) into `CountVectorizer()` and return the matrix.
* Secondly, we have to transform our testing data (`X_test`) to return the matrix.
Note that `X_train` is our training data for the 'sms_message' column in our dataset and we will be using this to train our model.
`X_test` is our testing data for the 'sms_message' column and this is the data we will be using (after transformation to a matrix) to make predictions on. We will then compare those predictions with `y_test` in a later step.
For now, we have provided the code that does the matrix transformations for you!
```
'''
[Practice Node]
The code for this segment is in 2 parts. First, we are learning a vocabulary dictionary for the training data
and then transforming the data into a document-term matrix; secondly, for the testing data we are only
transforming the data into a document-term matrix.
This is similar to the process we followed in Step 2.3.
We will provide the transformed data to students in the variables 'training_data' and 'testing_data'.
'''
'''
Solution
'''
# Instantiate the CountVectorizer method
count_vector = CountVectorizer()
# Fit the training data and then return the matrix
training_data = count_vector.fit_transform(X_train)
# Transform testing data and return the matrix. Note we are not fitting the testing data into the CountVectorizer()
testing_data = count_vector.transform(X_test)
```
### Step 4.1: Bayes Theorem implementation from scratch ###
Now that we have our dataset in the format that we need, we can move onto the next portion of our mission which is the algorithm we will use to make our predictions to classify a message as spam or not spam. Remember that at the start of the mission we briefly discussed the Bayes theorem but now we shall go into a little more detail. In layman's terms, the Bayes theorem calculates the probability of an event occurring, based on certain other probabilities that are related to the event in question. It is composed of "prior probabilities" - or just "priors." These "priors" are the probabilities that we are aware of, or that are given to us. And Bayes theorem is also composed of the "posterior probabilities," or just "posteriors," which are the probabilities we are looking to compute using the "priors".
Let us implement the Bayes Theorem from scratch using a simple example. Let's say we are trying to find the odds of an individual having diabetes, given that he or she was tested for it and got a positive result.
In the medical field, such probabilities play a very important role as they often deal with life and death situations.
We assume the following:
`P(D)` is the probability of a person having Diabetes. Its value is `0.01`, or in other words, 1% of the general population has diabetes (disclaimer: these values are assumptions and are not reflective of any actual medical study).
`P(Pos)` is the probability of getting a positive test result.
`P(Neg)` is the probability of getting a negative test result.
`P(Pos|D)` is the probability of getting a positive result on a test done for detecting diabetes, given that you have diabetes. This has a value `0.9`. In other words the test is correct 90% of the time. This is also called the Sensitivity or True Positive Rate.
`P(Neg|~D)` is the probability of getting a negative result on a test done for detecting diabetes, given that you do not have diabetes. This also has a value of `0.9` and is therefore correct, 90% of the time. This is also called the Specificity or True Negative Rate.
The Bayes formula is as follows:
<img src="images/bayes_formula.png" height="242" width="242">
* `P(A)` is the prior probability of A occurring independently. In our example this is `P(D)`. This value is given to us.
* `P(B)` is the prior probability of B occurring independently. In our example this is `P(Pos)`.
* `P(A|B)` is the posterior probability that A occurs given B. In our example this is `P(D|Pos)`. That is, **the probability of an individual having diabetes, given that this individual got a positive test result. This is the value that we are looking to calculate.**
* `P(B|A)` is the prior probability of B occurring, given A. In our example this is `P(Pos|D)`. This value is given to us.
Putting our values into the formula for Bayes theorem we get:
`P(D|Pos) = P(D) * P(Pos|D) / P(Pos)`
The probability of getting a positive test result `P(Pos)` can be calculated using the Sensitivity and Specificity as follows:
`P(Pos) = [P(D) * Sensitivity] + [P(~D) * (1-Specificity))]`
```
'''
Instructions:
Calculate probability of getting a positive test result, P(Pos)
'''
'''
Solution (skeleton code will be provided)
'''
# P(D)
p_diabetes = 0.01
# P(~D)
p_no_diabetes = 0.99
# Sensitivity or P(Pos|D)
p_pos_diabetes = 0.9
# Specificity or P(Neg|~D)
p_neg_no_diabetes = 0.9
# P(Pos)
p_pos = # TODO
print('The probability of getting a positive test result P(Pos) is: {}',format(p_pos))
```
**Using all of this information we can calculate our posteriors as follows:**
The probability of an individual having diabetes, given that, that individual got a positive test result:
`P(D|Pos) = (P(D) * Sensitivity)) / P(Pos)`
The probability of an individual not having diabetes, given that, that individual got a positive test result:
`P(~D|Pos) = (P(~D) * (1-Specificity)) / P(Pos)`
The sum of our posteriors will always equal `1`.
```
'''
Instructions:
Compute the probability of an individual having diabetes, given that, that individual got a positive test result.
In other words, compute P(D|Pos).
The formula is: P(D|Pos) = (P(D) * P(Pos|D) / P(Pos)
'''
'''
Solution
'''
# P(D|Pos)
p_diabetes_pos = # TODO
print('Probability of an individual having diabetes, given that that individual got a positive test result is:\
',format(p_diabetes_pos))
'''
Instructions:
Compute the probability of an individual not having diabetes, given that, that individual got a positive test result.
In other words, compute P(~D|Pos).
The formula is: P(~D|Pos) = P(~D) * P(Pos|~D) / P(Pos)
Note that P(Pos|~D) can be computed as 1 - P(Neg|~D).
Therefore:
P(Pos|~D) = p_pos_no_diabetes = 1 - 0.9 = 0.1
'''
'''
Solution
'''
# P(Pos|~D)
p_pos_no_diabetes = 0.1
# P(~D|Pos)
p_no_diabetes_pos = # TODO
print 'Probability of an individual not having diabetes, given that that individual got a positive test result is:'\
,p_no_diabetes_pos
```
Congratulations! You have implemented Bayes Theorem from scratch. Your analysis shows that even if you get a positive test result, there is only an 8.3% chance that you actually have diabetes and a 91.67% chance that you do not have diabetes. This is of course assuming that only 1% of the entire population has diabetes which is only an assumption.
**What does the term 'Naive' in 'Naive Bayes' mean ?**
The term 'Naive' in Naive Bayes comes from the fact that the algorithm considers the features that it is using to make the predictions to be independent of each other, which may not always be the case. So in our Diabetes example, we are considering only one feature, that is the test result. Say we added another feature, 'exercise'. Let's say this feature has a binary value of `0` and `1`, where the former signifies that the individual exercises less than or equal to 2 days a week and the latter signifies that the individual exercises greater than or equal to 3 days a week. If we had to use both of these features, namely the test result and the value of the 'exercise' feature, to compute our final probabilities, Bayes' theorem would fail. Naive Bayes' is an extension of Bayes' theorem that assumes that all the features are independent of each other.
### Step 4.2: Naive Bayes implementation from scratch ###
Now that you have understood the ins and outs of Bayes Theorem, we will extend it to consider cases where we have more than one feature.
Let's say that we have two political parties' candidates, 'Jill Stein' of the Green Party and 'Gary Johnson' of the Libertarian Party and we have the probabilities of each of these candidates saying the words 'freedom', 'immigration' and 'environment' when they give a speech:
* Probability that Jill Stein says 'freedom': 0.1 ---------> `P(F|J)`
* Probability that Jill Stein says 'immigration': 0.1 -----> `P(I|J)`
* Probability that Jill Stein says 'environment': 0.8 -----> `P(E|J)`
* Probability that Gary Johnson says 'freedom': 0.7 -------> `P(F|G)`
* Probability that Gary Johnson says 'immigration': 0.2 ---> `P(I|G)`
* Probability that Gary Johnson says 'environment': 0.1 ---> `P(E|G)`
And let us also assume that the probability of Jill Stein giving a speech, `P(J)` is `0.5` and the same for Gary Johnson, `P(G) = 0.5`.
Given this, what if we had to find the probabilities of Jill Stein saying the words 'freedom' and 'immigration'? This is where the Naive Bayes' theorem comes into play as we are considering two features, 'freedom' and 'immigration'.
Now we are at a place where we can define the formula for the Naive Bayes' theorem:
<img src="images/naivebayes.png" height="342" width="342">
Here, `y` is the class variable (in our case the name of the candidate) and `x1` through `xn` are the feature vectors (in our case the individual words). The theorem makes the assumption that each of the feature vectors or words (`xi`) are independent of each other.
To break this down, we have to compute the following posterior probabilities:
* `P(J|F,I)`: Given the words 'freedom' and 'immigration' were said, what's the probability they were said by Jill?
Using the formula and our knowledge of Bayes' theorem, we can compute this as follows: `P(J|F,I)` = `(P(J) * P(F|J) * P(I|J)) / P(F,I)`. Here `P(F,I)` is the probability of the words 'freedom' and 'immigration' being said in a speech.
* `P(G|F,I)`: Given the words 'freedom' and 'immigration' were said, what's the probability they were said by Gary?
Using the formula, we can compute this as follows: `P(G|F,I)` = `(P(G) * P(F|G) * P(I|G)) / P(F,I)`
```
'''
Instructions: Compute the probability of the words 'freedom' and 'immigration' being said in a speech, or
P(F,I).
The first step is multiplying the probabilities of Jill Stein giving a speech with her individual
probabilities of saying the words 'freedom' and 'immigration'. Store this in a variable called p_j_text.
The second step is multiplying the probabilities of Gary Johnson giving a speech with his individual
probabilities of saying the words 'freedom' and 'immigration'. Store this in a variable called p_g_text.
The third step is to add both of these probabilities and you will get P(F,I).
'''
'''
Solution: Step 1
'''
# P(J)
p_j = 0.5
# P(F/J)
p_j_f = 0.1
# P(I/J)
p_j_i = 0.1
p_j_text = # TODO
print(p_j_text)
'''
Solution: Step 2
'''
# P(G)
p_g = 0.5
# P(F/G)
p_g_f = 0.7
# P(I/G)
p_g_i = 0.2
p_g_text = # TODO
print(p_g_text)
'''
Solution: Step 3: Compute P(F,I) and store in p_f_i
'''
p_f_i = # TODO
print('Probability of words freedom and immigration being said are: ', format(p_f_i))
```
Now we can compute the probability of `P(J|F,I)`, the probability of Jill Stein saying the words 'freedom' and 'immigration' and `P(G|F,I)`, the probability of Gary Johnson saying the words 'freedom' and 'immigration'.
```
'''
Instructions:
Compute P(J|F,I) using the formula P(J|F,I) = (P(J) * P(F|J) * P(I|J)) / P(F,I) and store it in a variable p_j_fi
'''
'''
Solution
'''
p_j_fi = # TODO
print('The probability of Jill Stein saying the words Freedom and Immigration: ', format(p_j_fi))
'''
Instructions:
Compute P(G|F,I) using the formula P(G|F,I) = (P(G) * P(F|G) * P(I|G)) / P(F,I) and store it in a variable p_g_fi
'''
'''
Solution
'''
p_g_fi = # TODO
print('The probability of Gary Johnson saying the words Freedom and Immigration: ', format(p_g_fi))
```
And as we can see, just like in the Bayes' theorem case, the sum of our posteriors is equal to 1.
Congratulations! You have implemented the Naive Bayes' theorem from scratch. Our analysis shows that there is only a 6.6% chance that Jill Stein of the Green Party uses the words 'freedom' and 'immigration' in her speech as compared with the 93.3% chance for Gary Johnson of the Libertarian party.
For another example of Naive Bayes, let's consider searching for images using the term 'Sacramento Kings' in a search engine. In order for us to get the results pertaining to the Scramento Kings NBA basketball team, the search engine needs to be able to associate the two words together and not treat them individually. If the search engine only searched for the words individually, we would get results of images tagged with 'Sacramento,' like pictures of city landscapes, and images of 'Kings,' which might be pictures of crowns or kings from history. But associating the two terms together would produce images of the basketball team. In the first approach we would treat the words as independent entities, so it would be considered 'naive.' We don't usually want this approach from a search engine, but it can be extremely useful in other cases.
Applying this to our problem of classifying messages as spam, the Naive Bayes algorithm *looks at each word individually and not as associated entities* with any kind of link between them. In the case of spam detectors, this usually works, as there are certain red flag words in an email which are highly reliable in classifying it as spam. For example, emails with words like 'viagra' are usually classified as spam.
### Step 5: Naive Bayes implementation using scikit-learn ###
Now let's return to our spam classification context. Thankfully, sklearn has several Naive Bayes implementations that we can use, so we do not have to do the math from scratch. We will be using sklearn's `sklearn.naive_bayes` method to make predictions on our SMS messages dataset.
Specifically, we will be using the multinomial Naive Bayes algorithm. This particular classifier is suitable for classification with discrete features (such as in our case, word counts for text classification). It takes in integer word counts as its input. On the other hand, Gaussian Naive Bayes is better suited for continuous data as it assumes that the input data has a Gaussian (normal) distribution.
```
'''
Instructions:
We have loaded the training data into the variable 'training_data' and the testing data into the
variable 'testing_data'.
Import the MultinomialNB classifier and fit the training data into the classifier using fit(). Name your classifier
'naive_bayes'. You will be training the classifier using 'training_data' and 'y_train' from our split earlier.
'''
'''
Solution
'''
from sklearn.naive_bayes import MultinomialNB
naive_bayes = # TODO
naive_bayes.fit(# TODO)
'''
Instructions:
Now that our algorithm has been trained using the training data set we can now make some predictions on the test data
stored in 'testing_data' using predict(). Save your predictions into the 'predictions' variable.
'''
'''
Solution
'''
predictions = naive_bayes.predict(# TODO)
```
Now that predictions have been made on our test set, we need to check the accuracy of our predictions.
### Step 6: Evaluating our model ###
Now that we have made predictions on our test set, our next goal is to evaluate how well our model is doing. There are various mechanisms for doing so, so first let's review them.
**Accuracy** measures how often the classifier makes the correct prediction. It’s the ratio of the number of correct predictions to the total number of predictions (the number of test data points).
**Precision** tells us what proportion of messages we classified as spam, actually were spam.
It is a ratio of true positives (words classified as spam, and which actually are spam) to all positives (all words classified as spam, regardless of whether that was the correct classification). In other words, precision is the ratio of
`[True Positives/(True Positives + False Positives)]`
**Recall (sensitivity)** tells us what proportion of messages that actually were spam were classified by us as spam.
It is a ratio of true positives (words classified as spam, and which actually are spam) to all the words that were actually spam. In other words, recall is the ratio of
`[True Positives/(True Positives + False Negatives)]`
For classification problems that are skewed in their classification distributions like in our case - for example if we had 100 text messages and only 2 were spam and the other 98 weren't - accuracy by itself is not a very good metric. We could classify 90 messages as not spam (including the 2 that were spam but we classify them as not spam, hence they would be false negatives) and 10 as spam (all 10 false positives) and still get a reasonably good accuracy score. For such cases, precision and recall come in very handy. These two metrics can be combined to get the **F1 score**, which is the weighted average of the precision and recall scores. This score can range from 0 to 1, with 1 being the best possible F1 score.
We will be using all 4 of these metrics to make sure our model does well. For all 4 metrics whose values can range from 0 to 1, having a score as close to 1 as possible is a good indicator of how well our model is doing.
```
'''
Instructions:
Compute the accuracy, precision, recall and F1 scores of your model using your test data 'y_test' and the predictions
you made earlier stored in the 'predictions' variable.
'''
'''
Solution
'''
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
print('Accuracy score: ', format(accuracy_score(# TODO)))
print('Precision score: ', format(precision_score(# TODO)))
print('Recall score: ', format(recall_score(# TODO)))
print('F1 score: ', format(f1_score(# TODO)))
```
### Step 7: Conclusion ###
One of the major advantages that Naive Bayes has over other classification algorithms is its ability to handle an extremely large number of features. In our case, each word is treated as a feature and there are thousands of different words. Also, it performs well even with the presence of irrelevant features and is relatively unaffected by them. The other major advantage it has is its relative simplicity. Naive Bayes' works well right out of the box and tuning its parameters is rarely ever necessary, except usually in cases where the distribution of the data is known.
It rarely ever overfits the data. Another important advantage is that its model training and prediction times are very fast for the amount of data it can handle. All in all, Naive Bayes' really is a gem of an algorithm!
Congratulations! You have successfully designed a model that can efficiently predict if an SMS message is spam or not!
Thank you for learning with us!
| github_jupyter |
# Setup
### Installing Dependencies and Mounting
```
%%capture
!pip install transformers
# Mount Google Drive
from google.colab import drive # import drive from google colab
ROOT = "/content/drive"
drive.mount(ROOT, force_remount=True)
```
### Imports
```
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
% matplotlib inline
import random
import json
import time
import datetime
import os
from transformers import GPT2Tokenizer, GPT2LMHeadModel, GPT2Config, AdamW, get_linear_schedule_with_warmup
import torch
torch.manual_seed(64)
from torch.utils.data import Dataset, random_split, DataLoader, RandomSampler, SequentialSampler
!pip show torch
```
### Setting Device
```
%cd /content/drive/MyDrive/AutoCompose/
!nvidia-smi
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device
```
# Data Preparation
### Data Collection
```
with open("data/anticipation.json", "r") as f:
data = json.load(f)
data = [poem for poem in data if len(poem["poem"].split()) < 100]
print(len(data))
data[:5]
```
### Data Model
```
class PoemDataset(Dataset):
def __init__(self, poems, tokenizer, max_length=768, gpt2_type="gpt2"):
self.tokenizer = tokenizer
self.input_ids = []
self.attn_masks = []
for poem in poems:
encodings_dict = tokenizer("<|startoftext|>"+poem["poem"]+"<|endoftext|>",
truncation=True,
max_length=max_length,
padding="max_length")
self.input_ids.append(torch.tensor(encodings_dict["input_ids"]))
self.attn_masks.append(torch.tensor(encodings_dict["attention_mask"]))
def __len__(self):
return len(self.input_ids)
def __getitem__(self, idx):
return self.input_ids[idx], self.attn_masks[idx]
# Loading GPT2 Tokenizer
tokenizer = GPT2Tokenizer.from_pretrained('gpt2',
bos_token='<|startoftext|>',
eos_token='<|endoftext|>',
pad_token='<|pad|>')
```
### Rough
```
print(tokenizer.encode("<|startoftext|> Hello World <|endoftext|>", padding="max_length", max_length=10))
print(len(tokenizer))
# Finding length of maximum token in dataset
max_length = max([len(tokenizer.encode(poem["poem"])) for poem in data])
print(max_length)
max_length = 100
x = [len(tokenizer.encode(poem["poem"])) for poem in data if len(tokenizer.encode(poem["poem"])) < 100]
y = [len(tokenizer.encode(poem["poem"])) - len(poem["poem"].split()) for poem in data]
print(sum(y)/len(y))
print(max(x), len(x))
plt.hist(x, bins = 5)
plt.show
```
### Dataset Creation
```
batch_size = 32
max_length = 100
dataset = PoemDataset(data, tokenizer, max_length=max_length)
# Split data into train and validation sets
train_size = int(0.9*len(dataset))
val_size = len(dataset) - train_size
train_dataset, val_dataset = random_split(dataset, [train_size, val_size])
print("Number of samples for training =", train_size)
print("Number of samples for validation =", val_size)
train_dataset[0]
train_dataloader = DataLoader(train_dataset,
sampler=RandomSampler(train_dataset),
batch_size=batch_size)
val_dataloader = DataLoader(val_dataset,
sampler=SequentialSampler(val_dataset),
batch_size=batch_size)
```
# Finetune GPT2 Language Model
### Importing Pre-Trained GPT2 Model
```
# Load model configuration
config = GPT2Config.from_pretrained("gpt2")
# Create model instance and set embedding length
model = GPT2LMHeadModel.from_pretrained("gpt2", config=config)
model.resize_token_embeddings(len(tokenizer))
# Running the model on GPU
model = model.to(device)
# <<< Optional >>>
# Setting seeds to enable reproducible runs
seed_val = 42
random.seed(seed_val)
np.random.seed(seed_val)
torch.manual_seed(seed_val)
torch.cuda.manual_seed_all(seed_val)
```
### Scheduling Optimizer
```
epochs = 4
warmup_steps = 1e2
sample_every = 100
print(len(train_dataloader))
print(len(train_dataset))
# Using AdamW optimizer with default parameters
optimizer = AdamW(model.parameters(), lr=5e-4, eps=1e-8)
# Toatl training steps is the number of data points times the number of epochs
total_training_steps = len(train_dataloader)*epochs
# Setting a variable learning rate using scheduler
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=total_training_steps)
```
### Training
```
def format_time(elapsed):
return str(datetime.timedelta(seconds=int(round(elapsed))))
total_t0 = time.time()
training_stats = []
model = model.to(device)
for epoch_i in range(epochs):
print(f'Beginning epoch {epoch_i+1} of {epochs}')
t0 = time.time()
total_train_loss = 0
model.train()
# Labels are shifted by 1 timestep
for step, batch in enumerate(train_dataloader):
b_input_ids = batch[0].to(device)
b_labels = batch[0].to(device)
b_masks = batch[1].to(device)
model.zero_grad()
outputs = model(b_input_ids,
labels=b_labels,
attention_mask=b_masks)
loss = outputs[0]
batch_loss = loss.item()
total_train_loss += batch_loss
# Sampling every x steps
if step != 0 and step % sample_every == 0:
elapsed = format_time(time.time()-t0)
print(f'Batch {step} of {len(train_dataloader)}. Loss: {batch_loss}. Time: {elapsed}')
model.eval()
sample_outputs = model.generate(
bos_token_id=random.randint(1,30000),
do_sample=True,
top_k=50,
max_length = 200,
top_p=0.95,
num_return_sequences=1
)
for i, sample_output in enumerate(sample_outputs):
print(f'Example ouput: {tokenizer.decode(sample_output, skip_special_tokens=True)}')
print()
model.train()
loss.backward()
optimizer.step()
scheduler.step()
avg_train_loss = total_train_loss / len(train_dataloader)
training_time = format_time(time.time()-t0)
print(f'Average Training Loss: {avg_train_loss}. Epoch time: {training_time}')
print()
t0 = time.time()
model.eval()
total_eval_loss = 0
nb_eval_steps = 0
for batch in val_dataloader:
b_input_ids = batch[0].to(device)
b_labels = batch[0].to(device)
b_masks = batch[1].to(device)
with torch.no_grad():
outputs = model(b_input_ids,
attention_mask = b_masks,
labels=b_labels)
loss = outputs[0]
batch_loss = loss.item()
total_eval_loss += batch_loss
avg_val_loss = total_eval_loss / len(val_dataloader)
val_time = format_time(time.time() - t0)
print(f'Validation loss: {avg_val_loss}. Validation Time: {val_time}')
print()
# Record all statistics from this epoch.
training_stats.append(
{
'epoch': epoch_i + 1,
'Training Loss': avg_train_loss,
'Valid. Loss': avg_val_loss,
'Training Time': training_time,
'Validation Time': val_time
}
)
print("------------------------------")
print(f'Total training took {format_time(time.time()-total_t0)}')
```
### Visualizations
```
pd.set_option('precision', 2)
df_stats = pd.DataFrame(data=training_stats)
df_stats = df_stats.set_index('epoch')
# Use plot styling from seaborn.
sns.set(style='darkgrid')
# Increase the plot size and font size.
sns.set(font_scale=1.5)
plt.rcParams["figure.figsize"] = (12,6)
# Plot the learning curve.
plt.plot(df_stats['Training Loss'], 'b-o', label="Training")
plt.plot(df_stats['Valid. Loss'], 'g-o', label="Validation")
# Label the plot.
plt.title("Training & Validation Loss")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.xticks([1, 2, 3, 4])
plt.show()
```
### Generate Poems
```
model.eval()
prompt = "<|startoftext|>"
generated = torch.tensor(tokenizer.encode(prompt)).unsqueeze(0)
generated = generated.to(device)
sample_outputs = model.generate(
generated,
do_sample=True,
top_k=50,
max_length = 300,
top_p=0.95,
num_return_sequences=3
)
for i, sample_output in enumerate(sample_outputs):
print("{}: {}\n\n".format(i, tokenizer.decode(sample_output, skip_special_tokens=True)))
```
### Saving and Loading Finetuned Model
```
output_dir = "/content/drive/My Drive/AutoCompose/models/anticipation2"
# Save generated poems
# sample_outputs = model.generate(
# generated,
# do_sample=True,
# top_k=50,
# max_length = 300,
# top_p=0.95,
# num_return_sequences=25
# )
# with open(os.path.join(output_dir, 'generated_poems.txt'), "w") as outfile:
# for i, sample_output in enumerate(sample_outputs):
# outfile.write(tokenizer.decode(sample_output, skip_special_tokens=True)+"\n\n")
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = model.module if hasattr(model, 'module') else model
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
# torch.save(training_stats, os.path.join(output_dir, 'training_args.bin'))
# Save generated poems
sample_outputs = model.generate(
generated,
do_sample=True,
top_k=50,
max_length = 300,
top_p=0.95,
num_return_sequences=25
)
with open(os.path.join(output_dir, 'generated_poems.txt'), "w") as outfile:
for i, sample_output in enumerate(sample_outputs):
outfile.write(tokenizer.decode(sample_output, skip_special_tokens=True)+"\n\n")
# Loading saved model
model_dir = "/content/drive/My Drive/AutoCompose/models/neutral"
model = GPT2LMHeadModel.from_pretrained(model_dir)
tokenizer = GPT2Tokenizer.from_pretrained(model_dir)
model.to(device)
```
# Version Control
```
!git config --global user.email "[email protected]"
!git config --global user.name "prajwal"
import json
f = open("AutoComposeCreds.json")
data = json.load(f)
f.close()
print(data)
username="prajwalcr"
repository="AutoCompose"
git_token = data["git-token"]
!git clone https://{git_token}@github.com/{username}/{repository}
%cd /content/drive/MyDrive/AutoCompose/
!git pull
!git push
!git add .
!git commit -m "anger model trained on uni-m dataset added"
!git filter-branch --tree-filter 'rm -rf models/' HEAD
!git add .
!git status
!git commit -m "new models added"
```
| github_jupyter |
## Define the Convolutional Neural Network
In this notebook and in `models.py`:
1. Define a CNN with images as input and keypoints as output
2. Construct the transformed FaceKeypointsDataset, just as before
3. Train the CNN on the training data, tracking loss
4. See how the trained model performs on test data
5. If necessary, modify the CNN structure and model hyperparameters, so that it performs *well* **\***
**\*** What does *well* mean?
"Well" means that the model's loss decreases during training **and**, when applied to test image data, the model produces keypoints that closely match the true keypoints of each face. And you'll see examples of this later in the notebook.
---
## CNN Architecture
Recall that CNN's are defined by a few types of layers:
* Convolutional layers
* Maxpooling layers
* Fully-connected layers
### Define model in the provided file `models.py` file
## PyTorch Neural Nets
To define a neural network in PyTorch, we have defined the layers of a model in the function `__init__` and defined the feedforward behavior of a network that employs those initialized layers in the function `forward`, which takes in an input image tensor, `x`. The structure of this Net class is shown below and left for you to fill in.
Note: During training, PyTorch will be able to perform backpropagation by keeping track of the network's feedforward behavior and using autograd to calculate the update to the weights in the network.
#### Define the Layers in ` __init__`
As a reminder, a conv/pool layer may be defined like this (in `__init__`):
```
# 1 input image channel (for grayscale images), 32 output channels/feature maps, 3x3 square convolution kernel
self.conv1 = nn.Conv2d(1, 32, 3)
# maxpool that uses a square window of kernel_size=2, stride=2
self.pool = nn.MaxPool2d(2, 2)
```
#### Refer to Layers in `forward`
Then referred to in the `forward` function like this, in which the conv1 layer has a ReLu activation applied to it before maxpooling is applied:
```
x = self.pool(F.relu(self.conv1(x)))
```
Best practice is to place any layers whose weights will change during the training process in `__init__` and refer to them in the `forward` function; any layers or functions that always behave in the same way, such as a pre-defined activation function, should appear *only* in the `forward` function.
#### Why models.py
We are tasked with defining the network in the `models.py` file so that any models we define can be saved and loaded by name in different notebooks in this project directory. For example, by defining a CNN class called `Net` in `models.py`, we can then create that same architecture in this and other notebooks by simply importing the class and instantiating a model:
```
from models import Net
net = Net()
```
```
# load the data if you need to; if you have already loaded the data, you may comment this cell out
# -- DO NOT CHANGE THIS CELL -- #
!mkdir /data
!wget -P /data/ https://s3.amazonaws.com/video.udacity-data.com/topher/2018/May/5aea1b91_train-test-data/train-test-data.zip
!unzip -n /data/train-test-data.zip -d /data
```
<div class="alert alert-info">**Note:** Workspaces automatically close connections after 30 minutes of inactivity (including inactivity while training!). Use the code snippet below to keep your workspace alive during training. (The active_session context manager is imported below.)
</div>
```
from workspace_utils import active_session
with active_session():
train_model(num_epochs)
```
```
# import the usual resources
import matplotlib.pyplot as plt
import numpy as np
# import utilities to keep workspaces alive during model training
from workspace_utils import active_session
# watch for any changes in model.py, if it changes, re-load it automatically
%load_ext autoreload
%autoreload 2
## Define the Net in models.py
import torch
import torch.nn as nn
import torch.nn.functional as F
## Once you've define the network, you can instantiate it
# one example conv layer has been provided for you
from models import Net
net = Net()
print(net)
```
## Transform the dataset
To prepare for training, we have created a transformed dataset of images and keypoints.
### Define a data transform
In PyTorch, a convolutional neural network expects a torch image of a consistent size as input. For efficient training, and so our model's loss does not blow up during training, it is also suggested that we normalize the input images and keypoints. The necessary transforms have been defined in `data_load.py` and we **do not** need to modify these.
To define the data transform below, we have used a [composition](http://pytorch.org/tutorials/beginner/data_loading_tutorial.html#compose-transforms) of:
1. Rescaling and/or cropping the data, such that we are left with a square image (the suggested size is 224x224px)
2. Normalizing the images and keypoints; turning each RGB image into a grayscale image with a color range of [0, 1] and transforming the given keypoints into a range of [-1, 1]
3. Turning these images and keypoints into Tensors
**This transform will be applied to the training data and, later, the test data**. It will change how we go about displaying these images and keypoints, but these steps are essential for efficient training.
```
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
# the dataset we created in Notebook 1 is copied in the helper file `data_load.py`
from data_load import FacialKeypointsDataset
# the transforms we defined in Notebook 1 are in the helper file `data_load.py`
from data_load import Rescale, RandomCrop, Normalize, ToTensor
## define the data_transform using transforms.Compose([all tx's, . , .])
# order matters! i.e. rescaling should come before a smaller crop
data_transform = transforms.Compose([Rescale(250),
RandomCrop(224),
Normalize(),
ToTensor()])
# testing that you've defined a transform
assert(data_transform is not None), 'Define a data_transform'
# create the transformed dataset
transformed_dataset = FacialKeypointsDataset(csv_file='/data/training_frames_keypoints.csv',
root_dir='/data/training/',
transform=data_transform)
print('Number of images: ', len(transformed_dataset))
# iterate through the transformed dataset and print some stats about the first few samples
for i in range(4):
sample = transformed_dataset[i]
print(i, sample['image'].size(), sample['keypoints'].size())
```
## Batching and loading data
Next, having defined the transformed dataset, we can use PyTorch's DataLoader class to load the training data in batches of whatever size as well as to shuffle the data for training the model. You can read more about the parameters of the DataLoader in [this documentation](http://pytorch.org/docs/master/data.html).
#### Batch size
Decide on a good batch size for training your model. Try both small and large batch sizes and note how the loss decreases as the model trains. Too large a batch size may cause your model to crash and/or run out of memory while training.
**Note for Windows users**: Please change the `num_workers` to 0 or you may face some issues with your DataLoader failing.
```
# load training data in batches
batch_size = 10
train_loader = DataLoader(transformed_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=4)
```
## Before training
Take a look at how this model performs before it trains. You should see that the keypoints it predicts start off in one spot and don't match the keypoints on a face at all! It's interesting to visualize this behavior so that you can compare it to the model after training and see how the model has improved.
#### Load in the test dataset
The test dataset is one that this model has *not* seen before, meaning it has not trained with these images. We'll load in this test data and before and after training, see how our model performs on this set!
To visualize this test data, we have to go through some un-transformation steps to turn our images into python images from tensors and to turn our keypoints back into a recognizable range.
```
# load in the test data, using the dataset class
# AND apply the data_transform you defined above
# create the test dataset
test_dataset = FacialKeypointsDataset(csv_file='/data/test_frames_keypoints.csv',
root_dir='/data/test/',
transform=data_transform)
# load test data in batches
batch_size = 10
test_loader = DataLoader(test_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=4)
```
## Apply the model on a test sample
To test the model on a test sample of data, we have to follow these steps:
1. Extract the image and ground truth keypoints from a sample
2. Wrap the image in a Variable, so that the net can process it as input and track how it changes as the image moves through the network.
3. Make sure the image is a FloatTensor, which the model expects.
4. Forward pass the image through the net to get the predicted, output keypoints.
This function test how the network performs on the first batch of test data. It returns the images, the transformed images, the predicted keypoints (produced by the model), and the ground truth keypoints.
```
# test the model on a batch of test images
def net_sample_output():
# iterate through the test dataset
for i, sample in enumerate(test_loader):
# get sample data: images and ground truth keypoints
images = sample['image']
key_pts = sample['keypoints']
# convert images to FloatTensors
images = images.type(torch.FloatTensor)
# forward pass to get net output
output_pts = net(images)
# reshape to batch_size x 68 x 2 pts
output_pts = output_pts.view(output_pts.size()[0], 68, -1)
# break after first image is tested
if i == 0:
return images, output_pts, key_pts
```
#### Debugging tips
If you get a size or dimension error here, make sure that your network outputs the expected number of keypoints! Or if you get a Tensor type error, look into changing the above code that casts the data into float types: `images = images.type(torch.FloatTensor)`.
```
# call the above function
# returns: test images, test predicted keypoints, test ground truth keypoints
test_images, test_outputs, gt_pts = net_sample_output()
# print out the dimensions of the data to see if they make sense
print(test_images.data.size())
print(test_outputs.data.size())
print(gt_pts.size())
```
## Visualize the predicted keypoints
Once we've had the model produce some predicted output keypoints, we can visualize these points in a way that's similar to how we've displayed this data before, only this time, we have to "un-transform" the image/keypoint data to display it.
The *new* function, `show_all_keypoints` displays a grayscale image, its predicted keypoints and its ground truth keypoints (if provided).
```
def show_all_keypoints(image, predicted_key_pts, gt_pts=None):
"""Show image with predicted keypoints"""
# image is grayscale
plt.imshow(image, cmap='gray')
plt.scatter(predicted_key_pts[:, 0], predicted_key_pts[:, 1], s=20, marker='.', c='m')
# plot ground truth points as green pts
if gt_pts is not None:
plt.scatter(gt_pts[:, 0], gt_pts[:, 1], s=20, marker='.', c='g')
```
#### Un-transformation
Next, you'll see a helper function. `visualize_output` that takes in a batch of images, predicted keypoints, and ground truth keypoints and displays a set of those images and their true/predicted keypoints.
This function's main role is to take batches of image and keypoint data (the input and output of your CNN), and transform them into numpy images and un-normalized keypoints (x, y) for normal display. The un-transformation process turns keypoints and images into numpy arrays from Tensors *and* it undoes the keypoint normalization done in the Normalize() transform; it's assumed that you applied these transformations when you loaded your test data.
```
# visualize the output
# by default this shows a batch of 10 images
def visualize_output(test_images, test_outputs, gt_pts=None, batch_size=10):
for i in range(batch_size):
plt.figure(figsize=(20,10))
ax = plt.subplot(1, batch_size, i+1)
# un-transform the image data
image = test_images[i].data # get the image from it's Variable wrapper
image = image.numpy() # convert to numpy array from a Tensor
image = np.transpose(image, (1, 2, 0)) # transpose to go from torch to numpy image
# un-transform the predicted key_pts data
predicted_key_pts = test_outputs[i].data
predicted_key_pts = predicted_key_pts.numpy()
# undo normalization of keypoints
predicted_key_pts = predicted_key_pts*50.0+100
# plot ground truth points for comparison, if they exist
ground_truth_pts = None
if gt_pts is not None:
ground_truth_pts = gt_pts[i]
ground_truth_pts = ground_truth_pts*50.0+100
# call show_all_keypoints
show_all_keypoints(np.squeeze(image), predicted_key_pts, ground_truth_pts)
plt.axis('off')
plt.show()
# call it
visualize_output(test_images, test_outputs, gt_pts)
```
## Training
#### Loss function
Training a network to predict keypoints is different than training a network to predict a class; instead of outputting a distribution of classes and using cross entropy loss, we have to choose a loss function that is suited for regression, which directly compares a predicted value and target value. Read about the various kinds of loss functions (like MSE or L1/SmoothL1 loss) in [this documentation](http://pytorch.org/docs/master/_modules/torch/nn/modules/loss.html).
### Define the loss and optimization
Next, we will define how the model will train by deciding on the loss function and optimizer.
---
```
## Define the loss and optimization
import torch.optim as optim
criterion = nn.MSELoss()
optimizer = optim.Adam(net.parameters(), lr = 0.001)
```
## Training and Initial Observation
Now, we will train on our batched training data from `train_loader` for a number of epochs.
```
def train_net(n_epochs):
# prepare the net for training
net.train()
training_loss = []
for epoch in range(n_epochs): # loop over the dataset multiple times
running_loss = 0.0
# train on batches of data, assumes you already have train_loader
for batch_i, data in enumerate(train_loader):
# get the input images and their corresponding labels
images = data['image']
key_pts = data['keypoints']
# flatten pts
key_pts = key_pts.view(key_pts.size(0), -1)
# convert variables to floats for regression loss
key_pts = key_pts.type(torch.FloatTensor)
images = images.type(torch.FloatTensor)
# forward pass to get outputs
output_pts = net(images)
# calculate the loss between predicted and target keypoints
loss = criterion(output_pts, key_pts)
# zero the parameter (weight) gradients
optimizer.zero_grad()
# backward pass to calculate the weight gradients
loss.backward()
# update the weights
optimizer.step()
# print loss statistics
running_loss += loss.item()
if batch_i % 10 == 9: # print every 10 batches
print('Epoch: {}, Batch: {}, Avg. Loss: {}'.format(epoch + 1, batch_i+1, running_loss/10))
running_loss = 0.0
training_loss.append(running_loss)
print('Finished Training')
return training_loss
# train your network
n_epochs = 10 # start small, and increase when you've decided on your model structure and hyperparams
# this is a Workspaces-specific context manager to keep the connection
# alive while training your model, not part of pytorch
with active_session():
training_loss = train_net(n_epochs)
# visualize the loss as the network trained
plt.figure()
plt.semilogy(training_loss)
plt.grid()
plt.xlabel('Epoch')
plt.ylabel('Loss');
```
## Test data
See how the model performs on previously unseen, test data. We've already loaded and transformed this data, similar to the training data. Next, run the trained model on these images to see what kind of keypoints are produced.
```
# get a sample of test data again
test_images, test_outputs, gt_pts = net_sample_output()
print(test_images.data.size())
print(test_outputs.data.size())
print(gt_pts.size())
## visualize test output
# you can use the same function as before, by un-commenting the line below:
visualize_output(test_images, test_outputs, gt_pts)
```
Once we have found a good model (or two), we have to save the model so we can load it and use it later!
```
## change the name to something uniqe for each new model
model_dir = 'saved_models/'
model_name = 'facial_keypoints_model.pt'
# after training, save your model parameters in the dir 'saved_models'
torch.save(net.state_dict(), model_dir+model_name)
```
## Feature Visualization
Sometimes, neural networks are thought of as a black box, given some input, they learn to produce some output. CNN's are actually learning to recognize a variety of spatial patterns and you can visualize what each convolutional layer has been trained to recognize by looking at the weights that make up each convolutional kernel and applying those one at a time to a sample image. This technique is called feature visualization and it's useful for understanding the inner workings of a CNN.
In the cell below, you can see how to extract a single filter (by index) from your first convolutional layer. The filter should appear as a grayscale grid.
```
# Get the weights in the first conv layer, "conv1"
# if necessary, change this to reflect the name of your first conv layer
weights1 = net.conv1.weight.data
w = weights1.numpy()
filter_index = 0
print(w[filter_index][0])
print(w[filter_index][0].shape)
# display the filter weights
plt.imshow(w[filter_index][0], cmap='gray')
```
## Feature maps
Each CNN has at least one convolutional layer that is composed of stacked filters (also known as convolutional kernels). As a CNN trains, it learns what weights to include in it's convolutional kernels and when these kernels are applied to some input image, they produce a set of **feature maps**. So, feature maps are just sets of filtered images; they are the images produced by applying a convolutional kernel to an input image. These maps show us the features that the different layers of the neural network learn to extract. For example, you might imagine a convolutional kernel that detects the vertical edges of a face or another one that detects the corners of eyes. You can see what kind of features each of these kernels detects by applying them to an image. One such example is shown below; from the way it brings out the lines in an the image, you might characterize this as an edge detection filter.
<img src='images/feature_map_ex.png' width=50% height=50%/>
Next, choose a test image and filter it with one of the convolutional kernels in your trained CNN; look at the filtered output to get an idea what that particular kernel detects.
### Filter an image to see the effect of a convolutional kernel
---
```
## load in and display any image from the transformed test dataset
import cv2
image = cv2.imread('images/mona_lisa.jpg')
# convert image to grayscale
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) / 255.0
## Using cv's filter2D function
filter_kernel = np.array([[ 0, 1, 1],
[-1, 0, 1],
[-1, -1, 0]])
filtered_image = cv2.filter2D(image, -1, filter_kernel)
f, (ax1, ax2, ax3) = plt.subplots(ncols=3, nrows=1, figsize=(10, 5))
ax1.imshow(filter_kernel, cmap='gray')
ax2.imshow(image, cmap='gray')
ax3.imshow(filtered_image, cmap='gray')
ax1.set_title('Kernel')
ax2.set_title('Orginal Image')
ax3.set_title('Filtered image')
plt.tight_layout();
## apply a specific set of filter weights (like the one displayed above) to the test image
weights = net.conv1.weight.data.numpy()
filter_kernel = weights[filter_index][0]
filtered_image = cv2.filter2D(image, -1, filter_kernel)
f, (ax1, ax2, ax3) = plt.subplots(ncols=3, nrows=1, figsize=(10, 5))
ax1.imshow(filter_kernel, cmap='gray')
ax2.imshow(image, cmap='gray')
ax3.imshow(filtered_image, cmap='gray')
ax1.set_title('Kernel')
ax2.set_title('Orginal Image')
ax3.set_title('Filtered image')
plt.tight_layout();
```
---
## Moving on!
Now that we have defined and trained the model (and saved the best model), we are ready to move on to the last notebook, which combines a face detector with your saved model to create a facial keypoint detection system that can predict the keypoints on *any* face in an image!
| github_jupyter |
```
import numpy as np
from scipy.optimize import least_squares
#from pandas import Series, DataFrame
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('Qt5Agg')
%matplotlib qt5
#
# if pade.py is not in the current directory, set this path:
#
#import sys
#sys.path.append('../Python_libs')
from rac_aux import *
Angs2Bohr=1.8897259886
au2eV=27.211386027
au2cm=219474.63068
#
# files in the current directory do not need the path name
#
#df = pd.read_csv("/home/thomas/Python/StabPlots/Stab_data/1D_a0.2_b0_c0.14/crossing_1.dat", delim_whitespace=True)
df = pd.read_csv("sb_rac.csv")
#df = pd.read_csv("crossing_1.dat", delim_whitespace=True)
plot_it=False
if plot_it:
plt.cla()
plt.plot(df.l.values, df.E1.values, 'o-')
plt.plot(df.l.values, df.E2.values, 'o-')
plt.plot(df.l.values, df.E3.values, 'o-')
plt.show()
df[:5]
#
# put all negative E(lambda) points into the vectors: ls and Es
#
i_neg = np.argmin(abs(df.E1.values))
if df.E1[i_neg] > 0:
i_neg += 1
ls = df.l.values[i_neg:]
print('N=',len(ls))
Es = df.E1.values[i_neg:]
if plot_it:
plt.cla()
plt.plot(df.l.values, df.E1.values, 'b-')
plt.plot(df.l.values, df.E2.values, 'b-')
plt.plot(df.l.values, df.E3.values, 'b-')
plt.plot(ls, Es, 'o', color="orange")
plt.show()
#
# So far, nm can be in [21, 31, 32, 42, 53]
#
nm=32
fun=pade_32_lsq
jac=pade_32j_lsq
#
# kappas, kappa**2, and sigmas (weights = sigma**2)
# least_squares() passes parg to each pade_nm function
#
k2s = -Es
ks = np.sqrt(k2s)
sigmas = weights(len(Es), 'ones')
#sigmas = weights(len(Es), 'energy', E0=Es[11], Es=Es)
parg=(ks,k2s,ls,sigmas)
# start params depend on nm
p31_opt = [2.4022, 0.2713, 1.2813, 0.4543]
p42_opt = [2.3919, 0.2964, 1.3187, 1.3736, 0.29655, 0.5078]
E0 = linear_extra(ls,Es)
G0 = 0.2*E0
if nm == 21:
p0s=[ls[0]] + guess(E0, G0)
elif nm == 31:
p0s=[ls[0]] + guess(E0, G0) + [10]
elif nm == 32:
p0s=[ls[0]] + guess(E0, G0) + [10] + [1]
elif nm == 42:
p0s=[ls[0]] + guess(E0, G0) + guess(5*E0,10*G0) + [10]
elif nm == 53:
p0s = p42_opt[0:5] + p31_opt[3:] + p42_opt[5:] + [1]
else:
print("Warning", nm, "not implemented")
print(p0s)
print(chi2_gen(p0s, ks, k2s, ls, sigmas, fun))
#
# test the derivative of [n,m]
#
N=6
df1s = pade_gen_j_lsq(p0s, ks[-N:], k2s[-N:], ls[-N:], sigmas[-N:], fun)
print("num grad:\n", df1s)
df2s = jac(p0s, ks[-N:], k2s[-N:], ls[-N:], sigmas[-N:])
print("ana grad:\n", df2s)
np.sqrt(np.sum(np.square(df1s-df2s)))
print('Least squares, trust-region-reflective (default) with 2-point jac')
res = least_squares(fun, p0s, method='trf', jac='2-point', args=parg)
print("njev:",res.njev)
print("cost:",res.cost)
print("grad:",res.grad)
print("message:",res.message)
print("success:",res.success)
print("x:", res.x)
print('chi2 = %.3e' % (res.cost*2))
print("Er=%f, Gamma=%f" % res_ene(res.x[1], res.x[2]))
print('Least squares, trust-region-reflective (default) with analytic jac')
res = least_squares(fun, p0s, method='trf', jac=jac, args=parg)
print("njev:",res.njev)
print("cost:",res.cost)
print("grad:",res.grad)
print("message:",res.message)
print("success:",res.success)
print("x:", res.x)
print('chi2 = %.3e' % (res.cost*2))
print("Er=%f, Gamma=%f" % res_ene(res.x[1], res.x[2]))
print('Least squares, Levenberg-Marquardt with analytic jac')
res = least_squares(fun, p0s, method='lm', jac=jac, args=parg)
print("njev:",res.njev)
print("cost:",res.cost)
print("grad:",res.grad)
print("message:",res.message)
print("success:",res.success)
print("x:", res.x)
print('chi2 = %.3e' % (res.cost*2))
print("Er=%f, Gamma=%f" % res_ene(res.x[1], res.x[2]))
print('Least squares, TRF with bounds')
#
# bnds depend on the number of parameters
#
npara=len(p0s)
zs = np.zeros(npara)
infs = np.full(npara, np.inf)
bnds=(zs, infs)
res = least_squares(fun, p0s, jac=jac, bounds=bnds, args=parg)
print("njev:",res.njev)
print("cost:",res.cost)
print("grad:",res.grad)
print("message:",res.message)
print("success:",res.success)
print("x:", res.x)
print('chi2 = %.3e' % (res.cost*2))
print("Er=%f, Gamma=%f" % res_ene(res.x[1], res.x[2]))
#
# swipe energy filter
#
M=len(Es)
sigmas = weights(M, 'ones')
res = least_squares(fun, p0s, method='trf', jac=jac, args=(ks, k2s, ls, sigmas))
Er, G = res_ene(res.x[1], res.x[2])
print('All weights equal: chi2 = %.3e Er=%f, Gamma=%f' % (res.cost*2, Er, G))
M=len(Es)
for n in [0, M//4, M//2, 3*M//4, M-1]:
sigmas = weights(M, 'energy', E0=Es[n], Es=Es)
res = least_squares(fun, p0s, method='trf', jac=jac, args=(ks, k2s, ls, sigmas))
Er, G = res_ene(res.x[1], res.x[2])
print('Filter E = %6.2f: chi2 = %.3e Er=%f, Gamma=%f' % (Es[n], res.cost*2, Er, G))
```
| github_jupyter |
# Project 4: Neural Networks Project
All code was complied and run in Google Colab as Neural models take time to run and the university laptops donot have enough processing power to run the same.
##### All comments and conclusions have been added right below each code block for easier analysis and understanding
<a href="https://colab.research.google.com/github/adithyarganesh/CSC591_004_Neural_Nets/blob/main/Final_NN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Task 1. Automatic grid search
### Libraries
Key libraries used are keras and scikit-learn
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import GridSearchCV, cross_val_score, KFold
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
from keras.wrappers.scikit_learn import KerasRegressor
from keras.layers import Dense
from keras.models import Sequential
from keras.optimizers import Adam
data = pd.read_csv("20.csv", header = None)
data.head()
data.corr()
```
From the correlation values determined for the dataset, we notice that there is a high correlation with the first column in comparison with the rest
Splitting the data into train and test with a 2000 - 300 split
```
dataset = data.values
X = dataset[:,0:5]
Y = dataset[:,5]
X_test = X[-300:]
X = X[:-300]
Y_test = Y[-300:]
Y = Y[:-300]
```
First, I decided to run a baseline model and see how the mse value for it is coming to be as this would give a perspective of how the values can increase with modification and hyperparameter tuning.
```
# define base model
def baseline():
model = Sequential()
model.add(Dense(5, input_dim=5, kernel_initializer='normal', activation='relu'))
model.add(Dense(1, kernel_initializer='normal'))
model.compile(loss='mean_squared_error', optimizer='adam')
return model
estimator = KerasRegressor(build_fn=baseline, epochs=100, batch_size=5, verbose=0)
kfold = KFold(n_splits=10)
results = cross_val_score(estimator, X, Y, cv=kfold)
print("Baseline: %.2f (%.2f) MSE" % (results.mean(), results.std()))
```
As seen above, for a simple multilayer perceptron regressor, a very high mse value has been determined. This allows us to conclude that better hyperparameter tuning is required with modifications to other parameters such as learning rate, dropout, epochs etc.
Initially, I decided to nail down which an ideal optimizer would be, then I decided to tweak the other major parameters as it takes hours to try every combination.
For a list of optimizers, epochs and batch sizes, I was able to conclude that Adam optimizer is the most ideal for the dataset given to me.
The mse values for each combination while run in gridsearch has been listed below.
Best: -25979.201172 using {'batch_size': 20, 'epochs': 100, 'optimizer': 'adam'}
-83848.133594 (31485.334665) with: {'batch_size': 10, 'epochs': 10, 'optimizer': 'adam'}
-124149.147656 (106041.321994) with: {'batch_size': 10, 'epochs': 10, 'optimizer': 'RMSprop'}
-17538629.000000 (6145950.034129) with: {'batch_size': 10, 'epochs': 10, 'optimizer': 'Adagrad'}
-28976.654297 (6686.122457) with: {'batch_size': 10, 'epochs': 50, 'optimizer': 'adam'}
-28985.950000 (4135.118675) with: {'batch_size': 10, 'epochs': 50, 'optimizer': 'RMSprop'}
-1475655.350000 (144409.180757) with: {'batch_size': 10, 'epochs': 50, 'optimizer': 'Adagrad'}
-31307.830078 (7195.229146) with: {'batch_size': 10, 'epochs': 100, 'optimizer': 'adam'}
-35668.427344 (12147.983446) with: {'batch_size': 10, 'epochs': 100, 'optimizer': 'RMSprop'}
-1435397.200000 (173003.982770) with: {'batch_size': 10, 'epochs': 100, 'optimizer': 'Adagrad'}
-607021.156250 (225326.076199) with: {'batch_size': 20, 'epochs': 10, 'optimizer': 'adam'}
-155434.096875 (67205.428782) with: {'batch_size': 20, 'epochs': 10, 'optimizer': 'RMSprop'}
-39172515.600000 (7229904.980792) with: {'batch_size': 20, 'epochs': 10, 'optimizer': 'Adagrad'}
-32730.587109 (9326.100937) with: {'batch_size': 20, 'epochs': 50, 'optimizer': 'adam'}
-46073.637109 (17537.055165) with: {'batch_size': 20, 'epochs': 50, 'optimizer': 'RMSprop'}
-1622539.675000 (233324.938891) with: {'batch_size': 20, 'epochs': 50, 'optimizer': 'Adagrad'}
-25979.201172 (3285.793231) with: {'batch_size': 20, 'epochs': 100, 'optimizer': 'adam'}
-44877.579688 (7302.797490) with: {'batch_size': 20, 'epochs': 100, 'optimizer': 'RMSprop'}
-1489904.750000 (215725.142852) with: {'batch_size': 20, 'epochs': 100, 'optimizer': 'Adagrad'}
-1350494.175000 (162489.428364) with: {'batch_size': 40, 'epochs': 10, 'optimizer': 'adam'}
-742374.950000 (163049.310736) with: {'batch_size': 40, 'epochs': 10, 'optimizer': 'RMSprop'}
-56523900.000000 (2665037.687018) with: {'batch_size': 40, 'epochs': 10, 'optimizer': 'Adagrad'}
-56658.258203 (24003.537579) with: {'batch_size': 40, 'epochs': 50, 'optimizer': 'adam'}
-64086.296094 (12042.358310) with: {'batch_size': 40, 'epochs': 50, 'optimizer': 'RMSprop'}
-9372795.800000 (5108249.641949) with: {'batch_size': 40, 'epochs': 50, 'optimizer': 'Adagrad'}
-30622.471875 (6322.287248) with: {'batch_size': 40, 'epochs': 100, 'optimizer': 'adam'}
-36232.569531 (13259.656484) with: {'batch_size': 40, 'epochs': 100, 'optimizer': 'RMSprop'}
-1600181.925000 (146014.239422) with: {'batch_size': 40, 'epochs': 100, 'optimizer': 'Adagrad'}
-1390699.350000 (145640.273592) with: {'batch_size': 60, 'epochs': 10, 'optimizer': 'adam'}
-1082542.925000 (144731.452078) with: {'batch_size': 60, 'epochs': 10, 'optimizer': 'RMSprop'}
-62656396.800000 (559420.032519) with: {'batch_size': 60, 'epochs': 10, 'optimizer': 'Adagrad'}
-69710.080469 (40863.769851) with: {'batch_size': 60, 'epochs': 50, 'optimizer': 'adam'}
-71970.824219 (24058.956433) with: {'batch_size': 60, 'epochs': 50, 'optimizer': 'RMSprop'}
-16491987.400000 (3500092.027003) with: {'batch_size': 60, 'epochs': 50, 'optimizer': 'Adagrad'}
-46966.215625 (15952.838801) with: {'batch_size': 60, 'epochs': 100, 'optimizer': 'adam'}
-45104.332812 (10972.408712) with: {'batch_size': 60, 'epochs': 100, 'optimizer': 'RMSprop'}
-2788073.200000 (698682.820182) with: {'batch_size': 60, 'epochs': 100, 'optimizer': 'Adagrad'}
-1493044.875000 (155697.516601) with: {'batch_size': 80, 'epochs': 10, 'optimizer': 'adam'}
-1351079.800000 (130707.791587) with: {'batch_size': 80, 'epochs': 10, 'optimizer': 'RMSprop'}
-65509906.400000 (3248526.947553) with: {'batch_size': 80, 'epochs': 10, 'optimizer': 'Adagrad'}
-263853.200000 (123436.623595) with: {'batch_size': 80, 'epochs': 50, 'optimizer': 'adam'}
-92486.471875 (25669.353331) with: {'batch_size': 80, 'epochs': 50, 'optimizer': 'RMSprop'}
-25053901.200000 (2766136.455614) with: {'batch_size': 80, 'epochs': 50, 'optimizer': 'Adagrad'}
-41316.805469 (6963.559710) with: {'batch_size': 80, 'epochs': 100, 'optimizer': 'adam'}
-47747.921094 (15393.723483) with: {'batch_size': 80, 'epochs': 100, 'optimizer': 'RMSprop'}
-6449660.600000 (3418975.118244) with: {'batch_size': 80, 'epochs': 100, 'optimizer': 'Adagrad'}
-1476760.825000 (167679.598081) with: {'batch_size': 100, 'epochs': 10, 'optimizer': 'adam'}
-1404041.825000 (201396.916914) with: {'batch_size': 100, 'epochs': 10, 'optimizer': 'RMSprop'}
-72146363.200000 (2264547.064452) with: {'batch_size': 100, 'epochs': 10, 'optimizer': 'Adagrad'}
-352332.837500 (104710.011595) with: {'batch_size': 100, 'epochs': 50, 'optimizer': 'adam'}
-90365.727344 (25890.780854) with: {'batch_size': 100, 'epochs': 50, 'optimizer': 'RMSprop'}
-32726843.600000 (8646951.726295) with: {'batch_size': 100, 'epochs': 50, 'optimizer': 'Adagrad'}
-42565.274219 (14731.363104) with: {'batch_size': 100, 'epochs': 100, 'optimizer': 'adam'}
-65972.997656 (29357.657998) with: {'batch_size': 100, 'epochs': 100, 'optimizer': 'RMSprop'}
-11417867.800000 (2452926.970178) with: {'batch_size': 100, 'epochs': 100, 'optimizer': 'Adagrad'}
```
def custom_model( momentum=0, dropout_rate=0.0, learn_rate=0.01, epochs = 10, verbose=0):
model = Sequential()
model.add(Dense(128, input_dim=X.shape[1], activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(1))
adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model.compile(loss='mean_squared_error', optimizer=adam, metrics=['mse'])
return model
np.random.seed(5)
model = KerasRegressor(build_fn=custom_model, verbose=0)
# Hyperparameter tuning
learn_rate = [0.0001, 0.001, 0.01]
dropout_rate = [0.0, 0.2, 0.3]
batch_size = [10, 50, 100]
epochs = [10, 50, 100]
param_grid = dict(batch_size=batch_size, epochs=epochs, learn_rate=learn_rate, dropout_rate=dropout_rate)
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1)
grid_result = grid.fit(X, Y)
```
I then created a model with two dense layers and used the Adam optimizer to perform the remaining hyperparameter tuning. There were the outputs that were obtained
```
print("Best mse is %f with params --> %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
std_dev = grid_result.cv_results_['std_test_score']
tuned_params = grid_result.cv_results_['params' ]
for mean, stdev, param in zip(means, std_dev, tuned_params):
print("%f, %f ----> %r" % (mean, stdev, param))
```
From the above values, we notice that the most optimal set of attributes were found to be.
#### 'batch_size': 10, 'dropout_rate': 0.2, 'epochs': 100, 'learn_rate': 0.01
## Task 2 - Compare the trained neural networkwith multivariable regression
```
X2 = sm.add_constant(X)
est = sm.OLS(Y, X2)
est2 = est.fit()
print(est2.summary())
reg2 = LinearRegression()
reg2.fit(X, Y)
print("The linear model is: Y = {:.5} + {:.5}*X1 + {:.5}*X2 + {:.5}*X3 + {:.5}*X4 + {:.5}*X5".format(reg2.intercept_, reg2.coef_[0], reg2.coef_[1], reg2.coef_[2], reg2.coef_[3], reg2.coef_[4]))
print("Y = a0 + a1X1 + a3X3 + a4X4 + a5X5")
```
We now calculate the sum of squared errors (SSE) for each of the models and determine which is the better model
```
LR_sse = 0
for v in Y - reg2.predict(X):
LR_sse += v**2
NN_sse = 0
for v in Y - grid_result.predict(X):
NN_sse += v**2
print("SSE for Multivariate regression: ", LR_sse)
print("SSE for estimation with Neural Moedl: ", NN_sse)
```
It can be seen that the SSE value for the custom neural model created with hyperparameter tuning seems to fare better in comparison to the Multivariable linear regression.
Below are two sample predictions made on untrained test data by both the models. To plain sight, the difference is minimal but on further analysis with hyper parammeter tuning, we see a much bigger difference in performance between the two models.
```
Y_test_pred = reg2.predict(X_test)
plt.plot(Y_test_pred[:50])
plt.plot(Y_test[:50])
Y_test_pred_NN = grid_result.predict(X_test)
plt.plot(Y_test_pred_NN[:50])
plt.plot(Y_test[:50])
```
## Conclusions
We notice that hyperparameter tuning is important and upon proper analysis choice of the parameters, a neural model can perform better than the previously run Multivariable regression model.
Refs:
https://machinelearningmastery.com/tutorial-first-neural-network-python-keras/
https://machinelearningmastery.com/regression-tutorial-keras-deep-learning-library-python/
https://www.kaggle.com/willkoehrsen/intro-to-model-tuning-grid-and-random-search
| github_jupyter |
## Dependencies
```
import json, warnings, shutil
from tweet_utility_scripts import *
from tweet_utility_preprocess_roberta_scripts import *
from transformers import TFRobertaModel, RobertaConfig
from tokenizers import ByteLevelBPETokenizer
from tensorflow.keras.models import Model
from tensorflow.keras import optimizers, metrics, losses, layers
from tensorflow.keras.callbacks import EarlyStopping, TensorBoard, ModelCheckpoint
SEED = 0
seed_everything(SEED)
warnings.filterwarnings("ignore")
```
# Load data
```
database_base_path = '/kaggle/input/tweet-dataset-split-roberta-base-96/'
k_fold = pd.read_csv(database_base_path + '5-fold.csv')
display(k_fold.head())
# Unzip files
!tar -xvf /kaggle/input/tweet-dataset-split-roberta-base-96/fold_1.tar.gz
!tar -xvf /kaggle/input/tweet-dataset-split-roberta-base-96/fold_2.tar.gz
!tar -xvf /kaggle/input/tweet-dataset-split-roberta-base-96/fold_3.tar.gz
# !tar -xvf /kaggle/input/tweet-dataset-split-roberta-base-96/fold_4.tar.gz
# !tar -xvf /kaggle/input/tweet-dataset-split-roberta-base-96/fold_5.tar.gz
```
# Model parameters
```
vocab_path = database_base_path + 'vocab.json'
merges_path = database_base_path + 'merges.txt'
base_path = '/kaggle/input/qa-transformers/roberta/'
config = {
"MAX_LEN": 96,
"BATCH_SIZE": 32,
"EPOCHS": 5,
"LEARNING_RATE": 3e-5,
"ES_PATIENCE": 1,
"question_size": 4,
"N_FOLDS": 1,
"base_model_path": base_path + 'roberta-base-tf_model.h5',
"config_path": base_path + 'roberta-base-config.json'
}
with open('config.json', 'w') as json_file:
json.dump(json.loads(json.dumps(config)), json_file)
```
# Model
```
module_config = RobertaConfig.from_pretrained(config['config_path'], output_hidden_states=False)
def model_fn(MAX_LEN):
input_ids = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_ids')
attention_mask = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='attention_mask')
base_model = TFRobertaModel.from_pretrained(config['base_model_path'], config=module_config, name="base_model")
sequence_output = base_model({'input_ids': input_ids, 'attention_mask': attention_mask})
last_state = sequence_output[0]
x_start = layers.Conv1D(1, 1)(last_state)
x_start = layers.Flatten()(x_start)
y_start = layers.Activation('softmax', name='y_start')(x_start)
x_end = layers.Conv1D(1, 1)(last_state)
x_end = layers.Flatten()(x_end)
y_end = layers.Activation('softmax', name='y_end')(x_end)
model = Model(inputs=[input_ids, attention_mask], outputs=[y_start, y_end])
model.compile(optimizers.Adam(lr=config['LEARNING_RATE']),
loss=losses.CategoricalCrossentropy(),
metrics=[metrics.CategoricalAccuracy()])
return model
```
# Tokenizer
```
tokenizer = ByteLevelBPETokenizer(vocab_file=vocab_path, merges_file=merges_path, lowercase=True, add_prefix_space=True)
tokenizer.save('./')
```
# Train
```
history_list = []
AUTO = tf.data.experimental.AUTOTUNE
for n_fold in range(config['N_FOLDS']):
n_fold +=1
print('\nFOLD: %d' % (n_fold))
# Load data
base_data_path = 'fold_%d/' % (n_fold)
x_train = np.load(base_data_path + 'x_train.npy')
y_train = np.load(base_data_path + 'y_train.npy')
x_valid = np.load(base_data_path + 'x_valid.npy')
y_valid = np.load(base_data_path + 'y_valid.npy')
### Delete data dir
shutil.rmtree(base_data_path)
# Train model
model_path = 'model_fold_%d.h5' % (n_fold)
model = model_fn(config['MAX_LEN'])
es = EarlyStopping(monitor='val_loss', mode='min', patience=config['ES_PATIENCE'],
restore_best_weights=True, verbose=1)
checkpoint = ModelCheckpoint(model_path, monitor='val_loss', mode='min',
save_best_only=True, save_weights_only=True)
history = model.fit(list(x_train), list(y_train),
validation_data=(list(x_valid), list(y_valid)),
batch_size=config['BATCH_SIZE'],
callbacks=[checkpoint, es],
epochs=config['EPOCHS'],
verbose=2).history
history_list.append(history)
# Make predictions
train_preds = model.predict(list(x_train))
valid_preds = model.predict(list(x_valid))
k_fold.loc[k_fold['fold_%d' % (n_fold)] == 'train', 'start_fold_%d' % (n_fold)] = train_preds[0].argmax(axis=-1)
k_fold.loc[k_fold['fold_%d' % (n_fold)] == 'train', 'end_fold_%d' % (n_fold)] = train_preds[1].argmax(axis=-1)
k_fold.loc[k_fold['fold_%d' % (n_fold)] == 'validation', 'start_fold_%d' % (n_fold)] = valid_preds[0].argmax(axis=-1)
k_fold.loc[k_fold['fold_%d' % (n_fold)] == 'validation', 'end_fold_%d' % (n_fold)] = valid_preds[1].argmax(axis=-1)
k_fold['end_fold_%d' % (n_fold)] = k_fold['end_fold_%d' % (n_fold)].astype(int)
k_fold['start_fold_%d' % (n_fold)] = k_fold['start_fold_%d' % (n_fold)].astype(int)
k_fold['end_fold_%d' % (n_fold)].clip(0, k_fold['text_len'], inplace=True)
k_fold['start_fold_%d' % (n_fold)].clip(0, k_fold['end_fold_%d' % (n_fold)], inplace=True)
k_fold['prediction_fold_%d' % (n_fold)] = k_fold.apply(lambda x: decode(x['start_fold_%d' % (n_fold)], x['end_fold_%d' % (n_fold)], x['text'], config['question_size'], tokenizer), axis=1)
k_fold['prediction_fold_%d' % (n_fold)].fillna('', inplace=True)
k_fold['jaccard_fold_%d' % (n_fold)] = k_fold.apply(lambda x: jaccard(x['text'], x['prediction_fold_%d' % (n_fold)]), axis=1)
```
# Model loss graph
```
sns.set(style="whitegrid")
for n_fold in range(config['N_FOLDS']):
print('Fold: %d' % (n_fold+1))
plot_metrics(history_list[n_fold])
```
# Model evaluation
```
display(evaluate_model_kfold(k_fold, config['N_FOLDS']).style.applymap(color_map))
```
# Visualize predictions
```
display(k_fold[[c for c in k_fold.columns if not (c.startswith('textID') or
c.startswith('text_len') or
c.startswith('selected_text_len') or
c.startswith('text_wordCnt') or
c.startswith('selected_text_wordCnt') or
c.startswith('fold_') or
c.startswith('start_fold_') or
c.startswith('end_fold_'))]].head(15))
```
| github_jupyter |
<a href="https://colab.research.google.com/github/kalz2q/mycolabnotebooks/blob/master/learnelixir.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# メモ
elixir を齧る。かじる。
今のイメージ $\quad$ erlang 上で、erlang は 並行処理のためのシステムで、その erlang 上で理想的な言語を作ろうとしたら、ruby + clojure みたいな言語になった。
Dave Thomas と まつもとゆきひろ が勧めているのだからいい言語なのだろう。
* https://elixirschool.com/ja/lessons/basics/control-structures/
* https://magazine.rubyist.net/articles/0054/0054-ElixirBook.
* https://dev.to/gumi/elixir-01--2585
* https://elixir-lang.org/getting-started/introduction.html
---
本を買った。
プログラミング elixir
dave thomas, 笹田耕一・鳥居雪訳、 ohmsha
programming elixir |> 1.6
を読む。
```
%%capture
!wget https://packages.erlang-solutions.com/erlang-solutions_2.0_all.deb && sudo dpkg -i erlang-solutions_2.0_all.deb
!sudo apt update
!sudo apt install elixir
!elixir -v
!date
```
---
メモ
`!elixir -h` (ヘルプ)としたらシェルワンライナー `elixir -e` が使えるらしいことがわかった。
`iex` というのがインタラクティブ環境なのだが、colab では使いにくいので `elixir -e` で代用する。
```
!elixir -e 'IO.puts 3 + 3'
!elixir -e 'IO.puts "hello world!"'
# 次のようにすればファイルが作れる
%%writefile temp.exs
IO.puts "this is a pen."
# cat してみる
!cat temp.exs
# ファイルを elixir で実行する
!elixir temp.exs
```
---
ネットで紹介されていた次のコードセルのコードはどうやって実行するのだろう。 今はわからなくていいと思うがとりあえず転記しておく。
説明:
このプログラムでは、Parallel というモジュールに pmap という関数を定義しているmap は、与えられたコレクションに対して map(Ruby での Enumerable#map と同じようなものと考えて下さい)を行なうのですが、 各要素の処理を、要素数の分だけプロセスを生成し、各プロセスで並行に実行する、というものです。 ちょっと見ても、よくわからないような気がしますが、大丈夫、本書を読めば、わかるようになりる
とのこと。
```
%%writefile temp.exs
defmodule Parallel do
def pmap(collection, func) do
collection
|> Enum.map(&(Task.async(fn -> func.(&1) end)))
|> Enum.map(&Task.await/1)
end
end
result = Parallel.pmap 1..1000, &(&1 * &1)
IO.inspect result
!elixir temp.exs
```
上の例で colab 環境で非同期処理が問題なく動くことが確認できたみたい。
---
次のもネットで紹介されていた例で、ハローワールド並行処理版
```
%%writefile temp.exs
parent = self()
spawn_link(fn ->
send parent, {:msg, "hello world"}
end)
receive do
{:msg, contents} -> IO.puts contents
end
!elixir temp.exs
```
上の例でやっていることはつぎのような流れである。
1. spawn_linkという関数に渡された関数が、関数の内容を実行する。
2. 新しく作られたプロセス側では、メインプロセス側(parent)に “hello world” というメッセージを送る。
3. メインプロセス側は、どこからかメッセージが来ないかを待ち受けて(receive)、メッセージが来たらそれをコンソールに表示する。
```
# 実験 とりあえず理解しない。 colab 環境でどうかだけ調べる。
%%writefile chain.exs
defmodule Chain do
def counter(next_pid) do
receive do
n -> send next_pid, n + 1
end
end
def create_processes(n) do
last = Enum.reduce 1..n, self(),
fn (_, send_to) -> spawn(Chain, :counter, [send_to]) end
send last, 0
receive do
final_answer when is_integer(final_answer) ->
"Result is #{inspect(final_answer)}"
end
end
def run(n) do
IO.puts inspect :timer.tc(Chain, :create_processes, [n])
end
end
!elixir --erl "+P 1000000" -r chain.exs -e "Chain.run(1_000_000)"
```
記事 https://ubiteku.oinker.me/2015/12/22/elixir試飲-2-カルチャーショックに戸惑う-並行指向プ/ のマシン Macbook Pro – 3 GHz Intel Core i7, 16GB RAM では 7 秒のところ、colab では 5 秒で終わってるね!!!!
手元のwindowsマシン intel core i5-9400 8gb ram でやったら次のようになった。
{3492935, "Result is 1000000"}
あれ、速いじゃん!!!!
---
コメントは `#`
```
%%writefile temp.exs
# コメント実験
str = "helloworld!!!!"
IO.puts str
!elixir temp.exs
```
---
n 進数、整数 integer
```
!elixir -e 'IO.puts 0b1111'
!elixir -e 'IO.puts 0o7777'
!elixir -e 'IO.puts 0xffff'
!elixir -e 'IO.puts 1000_000_00_0'
```
整数型に上限下限 fixed limit はない。 factorial(10000) が計算できる。今はしない。
---
問題
10進数を $n$ 進数にベースを変えるのはどうするか。 python では `int()`, `bin()`, `oct()`, `hex()` があった。
```
# python
print(0b1111)
print(0o7777)
print(0xffff)
print(int('7777',8))
print(bin(15))
print(oct(4095))
print(hex(65535))
!elixir -e 'IO.puts 0b1111'
!elixir -e 'IO.puts 0o7777'
!elixir -e 'IO.puts 0xffff'
!echo
# Integer.to_string() と言う関数を使う
# <> はバイナリー連結
!elixir -e 'IO.puts "0b" <> Integer.to_string(15,2)'
!elixir -e 'IO.puts "0o" <> Integer.to_string(4095,8)'
!elixir -e 'IO.puts "0x" <> Integer.to_string(65535,16)'
```
浮動小数点数 floating-point number
```
!elixir -e 'IO.puts 1.532e-4'
# .0 とか 1. とかはエラーになる
!elixir -e 'IO.puts 98099098.0809898888'
!elixir -e 'IO.puts 0.00000000000000000000000001' #=> 1.0e-26
!elixir -e 'IO.puts 90000000000000000000000000000000000000000000000000000000'
```
文字列 string
string という型はない、みたい。
---
質問 型を調べる関数はあるか。type() とか。
```
!elixir -e 'IO.puts "日本語が書けますか"'
!elixir -e 'IO.puts "日本語が書けます"'
# 関数に括弧をつけることができる
# \ で escape できる
!elixir -e 'IO.puts (0b1111)'
!elixir -e 'IO.puts ("にほんご\n日本語")'
!elixir -e "IO.puts ('にほんご\n\"日本語\"')"
# 文字連結 `+` ではない!!!!
!elixir -e 'IO.puts("ABCD"<>"EFGH")'
```
`<>` と言う記号はバイナリ連結ということらしい。
---
値の埋め込み
`#{変数名}` を記述することで、変数の値を埋め込むことができる。
```
!elixir -e 'val = 1000; IO.puts "val = #{val}"'
```
---
真偽値
elixir の 真偽値は true と false (小文字) で false と nil が false でそれ以外は true
```
!elixir -e 'if true do IO.puts "true" end'
!elixir -e 'if True do IO.puts "true" end'
!elixir -e 'if False do IO.puts "true" end' # False が大文字なので
!elixir -e 'if false do IO.puts "true" else IO.puts "false" end'
!elixir -e 'if nil do IO.puts "true" else IO.puts "false" end'
!elixir -e 'if 0 do IO.puts "true" else IO.puts "false" end'
!elixir -e 'if (-1) do IO.puts "true" else IO.puts "false" end'
!elixir -e 'if [] do IO.puts "true" else IO.puts "false" end'
!elixir -e 'if "" do IO.puts "true" else IO.puts "false" end'
```
`null` はない。
---
**マッチ演算子 `=`**
マッチ演算子 `=` はマッチ演算子である。 マッチ演算子を通して値を代入し、その後、マッチさせることができる。マッチすると、方程式の結果が返され、失敗すると、エラーになる。
```
!elixir -e 'IO.puts a = 1'
!elixir -e 'a =1; IO.puts 1 = a'
!elixir -e 'a =1; IO.puts 2 = a'
!elixir -e 'IO.inspect a = [1,2,3]' # リストは puts で表示できないので inspect を使う
!elixir -e '[a,b,c] = [1,2,3]; IO.puts c; IO.puts b'
```
上の例は、elixir は マッチ演算子 `=` があると左右がマッチするように最善を尽くす。 そのため、`[a,b,c] = [1,2,3]` で a,b,c に値が代入される。
```
!elixir -e 'IO.inspect [1,2,[3,4,5]]'
!elixir -e '[a,b,c] = [1,2,[3,4,5]]; IO.inspect c; IO.inspect b'
# 実験 => エラー
!elixir -e 'IO.insepct [a,b] = [1,2,3]'
# 実験
!elixir -e 'IO.inspect a = [[1,2,3]]'
!elixir -e 'IO.inspect [a] = [[1,2,3]]'
!elixir -e '[a] = [[1,2,3]]; IO.inspect a'
# 実験 => エラー
!elixir -e 'IO.insepct [a,b] = [a,b]'
# 実験 アトムについては後述
!elixir -e 'IO.puts a = :a'
!elixir -e 'a = :a; IO.inspect a = a'
!elixir -e 'a = :a; IO.puts a = a'
!elixir -e 'IO.puts :b'
```
アンダースコア `_` で値を無視する。 ワルドカード。
なんでも受け付ける。
```
!elixir -e 'IO.inspect [1,_,_]=[1,2,3]'
!elixir -e 'IO.inspect [1,_,_]=[1,"cat","dog"]'
```
変数は、バインド (束縛、紐付け) されると変更できない。
かと思ったらできてしまう。
```
!elixir -e 'a = 1; IO.puts a = 2'
```
元の変数を指し示すピン演算子 (`^` カレット) がある。
```
!elixir -e 'a = 1; IO.puts ^a = 2'
```
メモ $\quad$ 普通の関数型言語のように変数は変更できないルールにしてしまった方が簡単ではなかったか、と思わないでもない。 変数を不変にする、const 宣言みたいなのはないのか。
リストは不変 immutable なので安心。
```
# 大文字にする capitalize
!elixir -e 'IO.puts name = String.capitalize "elixir"'
# 大文字にする upcase
!elixir -e 'IO.puts String.upcase "elixir"'
```
# アトム
アトムは名前がそのまま値となる定数である。
**名前の前にコロン `:` をつけることでアトムになる。**
アトムの名前は utf-8 文字列 (記号を含む)、数字、アンダースコア `_` 、`@` で、終端文字としてのみ「!」や「?」が使える。
:fred $\quad$ :is_binary? $\quad$ :var@2 $\quad$ :<> $\quad$ :===
:"func/3" $\quad$ :"long john silver" $\quad$ :эликсир
:mötley_crüe
メモ
```
# 実験 アトムは宣言しないで突然使える
!elixir -e 'IO.puts :fred'
# 実験
!elixir -e 'IO.puts true === :true'
!elixir -e 'IO.puts :true'
!elixir -e 'IO.puts false === :false'
# 実験
!elixir -e 'IO.puts :fred'
!elixir -e 'IO.puts :is_binary?'
!elixir -e 'IO.puts :var@2'
!elixir -e 'IO.puts :<>'
!elixir -e 'IO.puts :==='
# セミコロンを含むアトムは iex 上では使えるが、シェルワンライナーでは使えない
# unexpected token: "" と言うエラーになる
# colab の環境だけでなく、通常のシェルでも同じ
# ファイルにしたプログラムでは使えるので問題ない
# !elixir -e 'IO.puts :"func/3"'
# !elixir -e 'IO.puts :"long john silver"'
!elixir -e 'IO.puts :эликсир'
!elixir -e 'IO.puts :mötley_crüe'
!elixir -e 'IO.puts :日本語はどうか'
```
演算子
```
!elixir -e 'IO.puts 1 + 2'
!elixir -e 'x = 10; IO.puts x + 1'
!elixir -e 'IO.puts 1 - 2'
!elixir -e 'x = 10; IO.puts x - 1'
!elixir -e 'IO.puts 5 * 2'
!elixir -e 'x = 10; IO.puts x * 4'
!echo
!elixir -e 'IO.puts 5 / 2'
!elixir -e 'x = 10; IO.puts x / 3'
# 浮動少数ではなく整数としての結果がほしい場合は div 関数を使用
!elixir -e 'IO.puts div(10,5)'
!elixir -e 'IO.puts div(10,4)'
# 割り算の余り、剰余を求める場合は rem関数を使用
!elixir -e 'IO.puts rem(10,4)'
!elixir -e 'IO.puts rem(10,3)'
!elixir -e 'IO.puts rem(10,2)'
# 比較演算子
!elixir -e 'IO.puts 1 == 1'
!elixir -e 'IO.puts 1 != 1'
!elixir -e 'IO.puts ! (1 != 1)'
!echo
!elixir -e 'IO.puts 20.0 == 20'
!elixir -e 'IO.puts 20.0 === 20'
!elixir -e 'IO.puts 20.0 !== 20'
# 論理演算子
# 論理和
!elixir -e 'IO.puts "ABC" == "ABC" || 20 == 30'
!elixir -e 'IO.puts "ABC" == "abc" || 20 == 30'
!echo
# 論理積
!elixir -e 'IO.puts "ABC" == "ABC" && 20 == 20'
!elixir -e 'IO.puts "ABC" == "ABC" && 20 == 30'
!elixir -e 'IO.puts "ABC" == "def" && 10 > 100'
!echo
# 否定
!elixir -e 'IO.puts !("ABC" == "ABC")'
!elixir -e 'IO.puts !("ABC" == "DEF")'
```
range
メモ $\quad$ range は型ではなく、struct である。 構造体?
`start..end` で表現される、とあるが、1..10 と書けばそれで range なのか?
```
!elixir -e 'IO.inspect Enum.to_list(1..3)'
!elixir -e 'IO.inspect Enum.to_list(0..10//3)'
!elixir -e 'IO.inspect Enum.to_list(0..10//-3)'
!elixir -e 'IO.inspect Enum.to_list(10..0//-3)'
!elixir -e 'IO.inspect Enum.to_list(1..1)'
!elixir -e 'IO.inspect Enum.to_list(1..-1)'
!elixir -e 'IO.inspect Enum.to_list(1..1//2)'
!elixir -e 'IO.inspect Enum.to_list(1..-1//2)'
!elixir -e 'IO.inspect Enum.to_list(1..-1//-2)'
!elixir -e 'IO.inspect 1..9//2'
```
正規表現 regular expression
正規表現も型ではなく、struct である。
```
!elixir -e 'IO.inspect Regex.run ~r{[aiueo]},"catapillar"'
!elixir -e 'IO.inspect Regex.scan ~r{[aiueo]},"catapillar"'
!elixir -e 'IO.inspect Regex.split ~r{[aiueo]},"catapillar"'
!elixir -e 'IO.inspect Regex.replace ~r{[aiueo]},"catapillar", "*"'
```
# コレクション型
## タプル
タプルは波括弧 brace を用いて定義する。
タプルに限らず elixir のコレクションはすべて要素のタイプを限定しない。
通常 2 から 4 の要素であり、それ以上の要素数の場合、map や struct の利用を考える。
タプルは関数の返り値に便利に利用される。
パターンマッチングと組み合わせて使われる。
---
cf. タプル以外の波括弧 brace の使用
* 値の代入`#{変数名}`
* 正規表現 Regex `r{}`
* マップ `%{}`
```
!elixir -e 'IO.inspect {3.14, :pie, "Apple"}'
!elixir -e '{status, count, action} = {3.14, :pie, "next"}; IO.puts action'
# 実験
# タプルの使い方の例
!echo hello > temp.txt
!elixir -e '{status, file} = File.open("temp.txt"); IO.inspect {status, file}'
!elixir -e '{status, file} = File.read("temp.txt"); IO.inspect {status, file}'
!elixir -e '{status, file} = File.read("temp02.txt"); IO.inspect {status, file}'
!elixir -e '{status, file} = File.write("temp.txt", "goodbye"); IO.inspect {status, file}'
!elixir -e '{status, file} = File.read("temp.txt"); IO.inspect {status, file}'
# 実験 タプルに ++ は使えるか。 => 使えない <> も使えない
# !elixir -e 'IO.inspect {3.14, :pie, "Apple"} ++ {3}'
# 実験 タプルに head は使えるか。 => 使えない
# !elixir -e 'IO.inspect hd {3.14, :pie, "Apple"}'
# 実験 タプルにパターンマッチングは使えるか。 => 使える
!elixir -e '{a,b,c} = {3.14, :pie, "Apple"}; IO.inspect [c,a,b]'
# 実験
# 項目の入れ替え
!elixir -e 'a=1; b=3; {b,a}={a,b}; IO.inspect {a,b}'
!elixir -e 'a=1; b=3; c=5; d= 7; {d,c,b,a}={a,b,c,d}; IO.inspect {a,b,c,d}'
# 実験
# タプルの要素にタプルはあるか
!elixir -e 'IO.inspect {3.14, :pie, "Apple", {3}}'
```
## リスト
他の言語の配列 array と elixir のリストは違うので注意。 lisp のリストと似たような概念である。
カラのリストでなければ、head (hd) と tail (tl) がある。hd は頭の1つで tl はそれ以降全部。
```
# リスト
!elixir -e 'IO.inspect [3.14, :pie, "Apple"]'
!elixir -e 'IO.inspect hd [3.14]'
!elixir -e 'IO.inspect tl [3.14]'
# リスト先頭への追加(高速)
!elixir -e 'IO.inspect ["π" | [3.14, :pie, "Apple"]]'
# リスト末尾への追加(低速)
!elixir -e 'IO.inspect [3.14, :pie, "Apple"] ++ ["Cherry"]'
```
上と下のコードセルでリストの連結を行っているが、++/2 演算子を用いている。 この `++/2` という表記は `++` が演算子自体で `/2` がアリティ (引数の数) を表す。
---
質問 $\quad$ アリティとはなにか。
---
質問 $\quad$ リストの連結に `++` で文字列の連結 `<>` なのはなぜか。 オーバーライディングはあるのか。 文字列 string はリストではないのか。 長さを測る関数も別々なのか。
```
# リストの連結
!elixir -e 'IO.inspect [1, 2] ++ [3, 4, 1]'
# リストの減算
# --/2 演算子は存在しない値を引いてしまってもオッケー
!elixir -e 'IO.inspect ["foo", :bar, 42] -- [42, "bar"]'
# 重複した値の場合、右辺の要素のそれぞれに対し、左辺の要素のうち初めて登場した同じ値が順次削除
!elixir -e 'IO.inspect [1,2,2,3,2,3] -- [1,2,3,2]'
# リストの減算の値のマッチには strict comparison が使われている
!elixir -e 'IO.inspect [2] -- [2.0]'
!elixir -e 'IO.inspect [2.0] -- [2.0]'
# head /tail
!elixir -e 'IO.inspect hd [3.14, :pie, "Apple"]'
!elixir -e 'IO.inspect tl [3.14, :pie, "Apple"]'
```
---
リストを頭部と尾部に分けるのに
* パターンマッチング
* cons 演算子( `|` )
を使うこともできる。
```
!elixir -e '[head | tail] = [3.14, :pie, "Apple"]; IO.inspect head; IO.inspect tail'
```
## キーワードリスト
キーワードリストとマップは elixir の連想配列である。
キーワードリストは最初の要素がアトムのタプルからなる特別なリストで、リストと同様の性能になる。
```
# キーワードリスト
!elixir -e 'IO.inspect [foo: "bar", hello: "world"]'
# タプルのリストとしても同じ
!elixir -e 'IO.inspect [{:foo, "bar"}, {:hello, "world"}]'
!elixir -e 'IO.inspect [foo: "bar", hello: "world"] == [{:foo, "bar"}, {:hello, "world"}]'
```
キーワードリストの 3 つの特徴
* キーはアトムである。
* キーは順序付けされている。
* キーの一意性は保証されない。
こうした理由から、キーワードリストは関数にオプションを渡すためによく用いられる。
```
# 実験 リストの角括弧は省略できる
!elixir -e 'IO.inspect foo: "bar", hello: "world"'
# 実験
!elixir -e 'IO.inspect [1, fred: 1, dave: 2]'
!elixir -e 'IO.inspect {1, fred: 1, dave: 2}'
!elixir -e 'IO.inspect {1, [{:fred,1},{:dave, 2}]}'
```
## マップ
* キーワードリストとは違ってどんな型のキーも使える。
* 順序付けされない。
* キーの一意性が保証されている。重複したキーが追加された場合は、前の値が置き換えられる。
* 変数をマップのキーにできる。
* `%{}` 構文で定義する。
```
!elixir -e 'IO.inspect %{:foo => "bar", "hello" => :world}'
!elixir -e 'map = %{:foo => "bar", "hello" => :world}; IO.inspect map[:foo]'
!elixir -e 'map = %{:foo => "bar", "hello" => :world}; IO.inspect map["hello"]'
!echo
!elixir -e 'key = "hello"; IO.inspect %{key => "world"}'
!echo
!elixir -e 'IO.inspect %{:foo => "bar", :foo => "hello world"}'
```
アトムのキーだけを含んだマップには特別な構文がある。
```
!elixir -e 'IO.inspect %{foo: "bar", hello: "world"} == %{:foo => "bar", :hello => "world"}'
# 加えて、アトムのキーにアクセスするための特別な構文がある。
!elixir -e 'map = %{foo: "bar", hello: "world"}; IO.inspect map.hello'
!elixir -e 'map = %{foo: "bar", hello: "world"}; IO.inspect map[:hello]'
!elixir -e 'map = %{:foo => "bar", :hello => "world"}; IO.inspect map[:hello]'
```
---
質問 map の特別な構文
1. `=>` の代わりにコロン `:` を使う
2. 要素を取り出すのに `[]` の代わりにピリオド `.` を使う
は不要ではないか。不要だが見かけが良くなる、ということか。普通はどっちを使うのか。無駄に構文を複雑にするだけのような気がする。
多分まず Python の dict でコロン `:` を使うこと、Ruby は `=>` を使うが糖衣構文としてコロン `:` が使えてその形が主流であることから、見かけ大切ということでこうなったのではないか。キーにアトムを使うことが前提ならば生産性が上がるかもしれない。キーであることを示すコロンが不要になる。fat arrow よりコロンの方が短い。map の定義が同時に行われる。要素の取り出しピリオドを使う点についても同様。ということは基本的にこの構文になる、と言う事だろう。
```
# マップの更新のための構文がある (新しい map が作成される)
# この構文は、マップに既に存在するキーを更新する場合にのみ機能する
!elixir -e 'map = %{foo: "bar", hello: "world"}; IO.inspect %{map | foo: "baz"}'
# 新しいキーを作成するには、`Map.put/3` を使用
!elixir -e 'map = %{hello: "world"}; IO.inspect Map.put(map, :foo, "baz")'
```
---
質問 binary については良くわからないので別途。
# バイナリ binary
```
# binaries
!elixir -e 'IO.inspect <<1,2>>'
!elixir -e 'IO.inspect <<1,10>>'
!elixir -e 'bin = <<1,10>>; IO.inspect byte_size bin'
!elixir -e 'bin = <<3::size(2),5::size(4),1::size(2)>>; IO.inspect bin'
!elixir -e 'IO.puts Integer.to_string(213,2)'
!elixir -e 'IO.puts 0b11'
!elixir -e 'IO.puts 0b0101'
!echo
!elixir -e 'bin = <<3::size(2),5::size(4),1::size(2)>>; IO.inspect byte_size bin'
!elixir -e 'bin = <<3::size(2),5::size(4),1::size(2)>>; IO.inspect :io.format("~-8.2b~n",:binary.bin_to_list(bin))'
!elixir -e 'IO.inspect <<1,2>> <> <<3>>'
```
----
** Date and Time 日付 **
```
# Date and Time
!elixir -e 'IO.inspect Date.new(2021,6,2)'
!elixir -e '{:ok, d1}=Date.new(2021,6,2); IO.inspect d1'
!elixir -e '{:ok, d1}=Date.new(2021,6,2); IO.inspect Date.day_of_week(d1)'
!elixir -e '{:ok, d1}=Date.new(2021,6,2); IO.inspect Date.add(d1,7)'
!elixir -e '{:ok, d1}=Date.new(2021,6,2); IO.inspect d1, structs: false'
```
`~D[...]` や `~T[...]` は elixir の シギル sigil である。 文字列とバイナリーのところで説明する。
# help について
メモ $\quad$ 関数の調べ方
Helper の使い方。 help, type, info, information とか。
下のコードセルにあるように、対象のモジュールの関数名を調べ、そのヘルプを見ればけっこうくわしくわかる。
コメントアウトしてあるのは出力が大きいので、とりあえずコメントアウトして出力を抑制してある。
具体的には、Enum にあたるところにモジュール名を入れて関数のリストを出す。 Ctrl+A Ctrl+C でコピーして vscode などでペーストして読む。 調べたい関数名をヘルプの、Enum.all?/1 のところに入れて出力をコピーして、vscode などでペーストして読む
```
# !elixir -e 'Enum.__info__(:functions) |> Enum.each(fn({function, arity}) -> IO.puts "#{function}/#{arity}" end)'
# !elixir -e 'require IEx.Helpers;IEx.Helpers.h Enum.all?/1'
# h 単独のドキュメントを見たい
# !elixir -e 'require IEx.Helpers;IEx.Helpers.h'
# i というのもある
# !elixir -e 'x = [3,2]; require IEx.Helpers;IEx.Helpers.i x'
# !elixir -e 'require IEx.Helpers;IEx.Helpers.h IO'
```
# Enum モジュール
Enum はリストなどコレクションを列挙するための一連のアルゴリズム。
* all?、any?
* chunk_every、chunk_by、map_every
* each
* map、filter、reduce
* min、max
* sort、uniq、uniq_by
* キャプチャ演算子 `(&)`
```
# all? 関数を引数で受け取り、リストの全体が true の時、true を返す
!elixir -e 'IO.puts Enum.all?(["foo", "bar", "hello"], fn(s) -> String.length(s) == 3 end)'
!elixir -e 'IO.puts Enum.all?(["foo", "bar", "hello"], fn(s) -> String.length(s) >1 end)'
# any? 少なくとも1つの要素が true と評価された場合に true を返す
!elixir -e 'IO.puts Enum.any?(["foo", "bar", "hello"], fn(s) -> String.length(s) == 5 end)'
# chunk_every リストを小さなグループに分割する
!elixir -e 'IO.inspect Enum.chunk([1, 2, 3, 4, 5, 6], 2)'
!elixir -e 'IO.inspect Enum.chunk([1, 2, 3, 4, 5, 6], 3)'
!elixir -e 'IO.inspect Enum.chunk([1, 2, 3, 4, 5, 6], 4)'
# chunk_by 関数の戻り値が変化することによって分割する
!elixir -e 'IO.inspect Enum.chunk_by(["one", "two", "three", "four", "five"], fn(x) -> String.length(x) end)'
!elixir -e 'IO.inspect Enum.chunk_by(["one", "two", "three", "four", "five", "six"], fn(x) -> String.length(x) end)'
# map_every nth ごとに map 処理する
!elixir -e 'IO.inspect Enum.map_every(1..10, 3, fn x -> x + 1000 end)'
!elixir -e 'IO.inspect Enum.map_every(1..10, 1, fn x -> x + 1000 end)'
!elixir -e 'IO.inspect Enum.map_every(1..10, 0, fn x -> x + 1000 end)'
# each 新しい値を生成することなく反復する。返り値は:ok というアトム。
!elixir -e 'IO.inspect Enum.each(["one", "two", "three"], fn(s) -> IO.puts(s) end)'
!elixir -e 'IO.puts Enum.each(["one", "two", "three"], fn(s) -> IO.puts(s) end)'
# map 関数を各要素に適用して新しいリストを生み出す
!elixir -e 'IO.inspect Enum.map([0, 1, 2, 3], fn(x) -> x - 1 end)'
# min 最小の値を探す。 リストが空の場合エラーになる
# リストが空だったときのために予め最小値を生成する関数を渡すことができる
!elixir -e 'IO.inspect Enum.min([5, 3, 0, -1])'
!elixir -e 'IO.inspect Enum.min([], fn -> :foo end)'
# max 最大の(max/1)値を返す
!elixir -e 'IO.inspect Enum.max([5, 3, 0, -1])'
!elixir -e 'IO.inspect Enum.max([], fn -> :bar end)'
# filter 与えられた関数によって true と評価された要素だけを得る
!elixir -e 'IO.inspect Enum.filter([1, 2, 3, 4], fn(x) -> rem(x, 2) == 0 end)'
!elixir -e 'IO.inspect Enum.filter([], fn(x) -> rem(x, 2) == 0 end)'
# reduce リストを関数に従って単一の値へ抽出する。 accumulator を指定できる。
# accumulator が与えられない場合は最初の要素が用いられる。
!elixir -e 'IO.inspect Enum.reduce([1, 2, 3], 10, fn(x, acc) -> x + acc end)'
!elixir -e 'IO.inspect Enum.reduce([1, 2, 3], fn(x, acc) -> x + acc end)'
!elixir -e 'IO.inspect Enum.reduce(["a","b","c"], "1", fn(x,acc)-> x <> acc end)'
# sort `sort/1` はソートの順序に Erlangの Term 優先順位 を使う
!elixir -e 'IO.inspect Enum.sort([5, 6, 1, 3, -1, 4])'
!elixir -e 'IO.inspect Enum.sort([:foo, "bar", Enum, -1, 4])'
# `sort/2` は、順序を決める為の関数を渡すことができる
!elixir -e 'IO.inspect Enum.sort([%{:val => 4}, %{:val => 1}], fn(x, y) -> x[:val] > y[:val] end)'
# なしの場合
!elixir -e 'IO.inspect Enum.sort([%{:count => 4}, %{:count => 1}])'
# sort/2 に :asc または :desc をソート関数として渡すことができる
!elixir -e 'IO.inspect Enum.sort([2, 3, 1], :desc)'
# uniq 重複した要素を取り除く
!elixir -e 'IO.inspect Enum.uniq([1, 2, 3, 2, 1, 1, 1, 1, 1])'
[1, 2, 3]
# uniq_by 重複した要素を削除するが、ユニークかどうか比較を行う関数を渡せる
!elixir -e 'IO.inspect Enum.uniq_by([%{x: 1, y: 1}, %{x: 2, y: 1}, %{x: 3, y: 3}], fn coord -> coord.y end)'
```
# キャプチャ演算子 `&` を使用した Enum と無名関数
elixir の Enum モジュール内の多くの関数は、引数として無名関数を取る。
これらの無名関数は、多くの場合、キャプチャ演算子 `&` を使用して省略形で記述される。
```
# 無名関数でのキャプチャ演算子の使用
!elixir -e 'IO.inspect Enum.map([1,2,3], fn number -> number + 3 end)'
!elixir -e 'IO.inspect Enum.map([1,2,3], &(&1 + 3))'
!elixir -e 'plus_three = &(&1 + 3);IO.inspect Enum.map([1,2,3], plus_three)'
# Enum.all? でもキャプチャ演算子が使えるか
# all? 関数を引数で受け取り、リストの全体が true の時、true を返す
# !elixir -e 'IO.puts Enum.all?(["foo", "bar", "hello"], fn(s) -> String.length(s) == 3 end)'
!elixir -e 'IO.puts Enum.all?(["foo", "bar", "hello"], &(String.length(&1)==3))'
# !elixir -e 'IO.puts Enum.all?(["foo", "bar", "hello"], fn(s) -> String.length(s) >1 end)'
!elixir -e 'IO.puts Enum.all?(["foo", "bar", "hello"], &(String.length(&1)>1))'
```
---
# パターンマッチング
パターンマッチングでは、値、データ構造、関数をマッチすることができる。
* マッチ演算子
* ピン演算子
```
# マッチ演算子 `=` はマッチ演算子である。 マッチ演算子を通して値を代入し、
# その後、マッチさせることができる。マッチすると、方程式の結果が返され、
# 失敗すると、エラーになる
!elixir -e 'IO.puts x = 1'
!elixir -e 'x = 1;IO.puts 1 = x'
# !elixir -e 'x = 1;IO.puts 2 = x'
#=> (MatchError) no match of right hand side value: 1
# リストでのマッチ演算子
!elixir -e 'IO.inspect list = [1, 2, 3]'
!elixir -e 'list = [1, 2, 3]; IO.inspect [1, 2, 3] = list'
# !elixir -e 'list = [1, 2, 3]; IO.inspect [] = list'
#=> (MatchError) no match of right hand side value: [1, 2, 3]
!elixir -e 'list = [1, 2, 3]; IO.inspect [1 | tail] = list'
!elixir -e 'list = [1, 2, 3]; [1 | tail] = list; IO.inspect tail'
# タプルとマッチ演算子
!elixir -e 'IO.inspect {:ok, value} = {:ok, "Successful!"}'
!elixir -e '{:ok, value} = {:ok, "Successful!"}; IO.inspect value'
```
---
**ピン演算子**
マッチ演算子は左辺に変数が含まれている時に代入操作を行う。
この変数を再び束縛するという挙動は望ましくない場合がある。 そうした状況のために、ピン演算子 `^` がある。
ピン演算子で変数を固定すると、新しく再束縛するのではなく既存の値とマッチする。
```
# ピン演算子
!elixir -e 'IO.inspect x = 1'
# !elixir -e 'x = 1; IO.inspect ^x = 2'
#=> ** (MatchError) no match of right hand side value: 2
!elixir -e 'x = 1; IO.inspect {x, ^x} = {2, 1}'
!elixir -e 'x = 1;{x, ^x} = {2, 1}; IO.inspect x'
!echo
!elixir -e 'IO.inspect key = "hello"'
!elixir -e 'key = "hello"; IO.inspect %{^key => value} = %{"hello" => "world"}'
!elixir -e 'key = "hello"; %{^key => value} = %{"hello" => "world"}; IO.inspect value'
!elixir -e 'key = "hello"; %{^key => value} = %{"hello" => "world"}; IO.inspect value'
# 関数の clause でのピン演算子
!elixir -e 'IO.inspect greeting = "Hello"'
!elixir -e 'greeting = "Hello"; IO.inspect greet = fn (^greeting, name) -> "Hi #{name}"; (greeting, name) -> "#{greeting},#{name}" end'
!elixir -e 'greeting = "Hello"; greet = fn (^greeting, name) -> "Hi #{name}"; (greeting, name) -> "#{greeting},#{name}" end; IO.inspect greet.("Hello","Sean")'
!elixir -e 'greeting = "Hello"; greet = fn (^greeting, name) -> "Hi #{name}"; (greeting, name) -> "#{greeting},#{name}" end; IO.inspect greet.("Mornin","Sean")'
```
# 制御構造 control structure
* if と unless
* case
* cond
* with
if と unless
elixir の if と unless は ruby と同じ。
elixir は if と unless はマクロとして定義されている。
この実装は kernel module で知ることができる。
elixir では偽とみなされる値は nil と真理値の false だけだということに留意。
```
%%writefile temp.exs
IO.puts (
if String.valid?("Hello") do
"Valid string!"
else
"Invalid string."
end)
!elixir temp.exs
%%writefile temp.exs
if "a string value" do
IO.puts "Truthy"
end
!elixir temp.exs
# unless/2 は if/2 の逆で、条件が否定される時だけ作用する
%%writefile temp.exs
unless is_integer("hello") do
IO.puts "Not an Int"
end
!elixir temp.exs
# 実験 シェルワンライナー版 do や end の前後にセミコロンは要らない
!elixir -e 'unless is_integer("hello") do IO.puts "Not an Int" end'
# 複数のパターンにマッチする場合、case/2 を使う
%%writefile temp.exs
IO.puts(
case {:error, "Hello World"} do
{:ok, result} -> result
{:error, _} -> "Uh oh!"
_ -> "Catch all"
end
)
!elixir temp.exs
# アンダースコア _ 変数は case/2 命令文の中に含まれる重要な要素
# これが無いと、マッチするものが見あたらない場合にエラーが発生する
# エラーの例
!elixir -e 'case :even do :odd -> IO.puts "Odd" end'
# アンダースコア _ を"他の全て"にマッチする else と考えること
!elixir -e 'case :even do :odd -> IO.puts "Odd"; _ -> IO.puts "Not odd" end'
# case/2 はパターンマッチングに依存しているため、パターンマッチングと同じルールや制限が全て適用される
# 既存の変数に対してマッチさせようという場合にはピン ^ 演算子を使う
!elixir -e 'pie=3.14; IO.puts(case "cherry pie" do ^pie -> "Not so tasty"; pie -> "I bet #{pie} is tasty" end)'
!elixir -e 'pie=3.14; IO.puts(case "cherry pie" do pie -> "Not so tasty"; pie -> "I bet #{pie} is tasty" end)'
# case/2 はガード節に対応している
# 公式ドキュメントの Expressions allowed in guard clauses を参照
!elixir -e 'IO.puts(case {1, 2, 3} do {1, x, 3} when x > 0 -> "Will match"; _ -> "Wont match" end)'
```
---
ガード節とは何か?
公式ドキュメントの Expressions allowed in guard clauses を参照
```
# cond
!elixir -e 'IO.puts (cond do 2+2==5 -> "This will not be true"; 2*2==3 -> "Nor this"; 1+1 == 2 -> "But this will" end)'
# cond も case と同様マッチしない場合にエラーになるので、true になる条件を定義する
!elixir -e 'IO.puts (cond do 7+1==0 -> "Incorrect"; true -> "Catch all" end)'
# with
# 特殊形式の with/1 はネストされた case/2 文やきれいにパイプできない状況に便利
# with/1 式はキーワード, ジェネレータ, そして式から成り立っている
# ジェネレータについてはリスト内包表記のところで詳しく述べる
# `<-` の右側と左側を比べるのにパターンマッチングが使われる
!elixir -e 'user=%{first: "Sean", last: "Callan"}; IO.inspect user'
!elixir -e 'user=%{first: "Sean", last: "Callan"}; with {:ok, first} <- Map.fetch(user, :first), {:ok, last} <- Map.fetch(user, :last), do: IO.puts last <> ", " <> first'
# シェルワンライナーが長いのでファイルにする
%%writefile temp.exs
user=%{first: "Sean", last: "Callan"}
with {:ok, first} <- Map.fetch(user, :first),
{:ok, last} <- Map.fetch(user, :last),
do: IO.puts last <> ", " <> first
!elixir temp.exs
# 式がマッチに失敗した場合
# Map.fetch が失敗して :error を返し、first が設定されずプログラムが止まる
%%writefile temp.exs
user = %{first: "doomspork"}
with {:ok, first} <- Map.fetch(user, :first),
{:ok, last} <- Map.fetch(user, :last),
do: IO.puts last <> ", " <> first
!elixir temp.exs
# with/1 で else が使える
%%writefile temp.exs
import Integer
m = %{a: 1, c: 3}
a =
with {:ok, number} <- Map.fetch(m, :a),
true <- is_even(number) do
IO.puts "#{number} divided by 2 is #{div(number, 2)}"
:even
else
:error ->
IO.puts("We don't have this item in map")
:error
_ ->
IO.puts("It is odd")
:odd
end
IO.inspect a
!elixir temp.exs
```
# 関数 Function
```
# 関数型言語では、関数は第一級オブジェクト first class object である
# ここでは無名関数、名前付き関数、アリティ、パターンマッチング、プライベート関数、ガード、デフォルト引数について学ぶ
# 無名関数 anonymous function
# fn end のキーワードを用い、 引数 `->` 関数定義 の形で定義する
%%writefile temp.exs
sum = fn (a, b) -> a + b end
IO.puts sum.(2, 3)
!elixir temp.exs
# シェルワンライナーで書いてみる
!elixir -e 'sum=fn(a,b)->a+b end;IO.puts sum.(2,3)'
# elixir では通常関数定義に省略記号 & を使う (キャプチャ演算子)
!elixir -e 'sum = &(&1 + &2); IO.puts sum.(2, 3)'
```
---
質問 無名関数に引数を渡して結果を得るのはどうやるのか
&(&1 + &2).(2, 3) として出来なかった。 => 出来た。
!elixir -e 'IO.puts ((&(&1 + &2)).(2,3))'
```
!elixir -e 'IO.puts ((fn (a,b) -> a + b end).(2,3))'
!elixir -e 'IO.puts ((&(&1 + &2)).(2,3))'
# 関数定義にパターンマッチングが使える
%%writefile temp.exs
handle_result = fn
{:ok, _result} -> IO.puts "Handling result..."
{:ok, _} -> IO.puts "This would be never run as previous will be matched beforehand."
{:error} -> IO.puts "An error has occurred!"
end
some_result = 1
handle_result.({:ok, some_result}) #=> Handling result...
handle_result.({:error}) #=> An error has occured!
!elixir temp.exs
# 名前付き関数
# 名前付き関数はモジュール内部で def キーワードを用いて定義する
%%writefile temp.exs
defmodule Greeter do
def hello(name) do
"Hello, " <> name
end
end
IO.puts Greeter.hello("Sean")
!elixir temp.exs
# 次のような書き方もできる do: を使う
%%writefile temp.exs
defmodule Greeter do
def hello(name), do: "Hello, " <> name
end
IO.puts Greeter.hello("Sean")
!elixir temp.exs
# 実験 シェルワンライナーで出来るか
!elixir -e 'defmodule Greeter do def hello(name) do "Hello, " <> name end end;IO.puts Greeter.hello("Sean")'
# 実験 シェルワンライナーで `, do:` 構文が使えるか
!elixir -e 'defmodule Greeter do def hello(name),do: "Hello, " <> name end;IO.puts Greeter.hello("Sean")'
# 再帰
%%writefile temp.exs
defmodule Length do
def of([]), do: 0
def of([_ | tail]), do: 1 + of(tail)
end
IO.puts Length.of []
IO.puts Length.of [1, 2, 3]
!elixir temp.exs
# アリティとは関数の引数の数
# 引数の数が違えば別の関数
%%writefile temp.exs
defmodule Greeter2 do
def hello(), do: "Hello, anonymous person!" # hello/0
def hello(name), do: "Hello, " <> name # hello/1
def hello(name1, name2), do: "Hello, #{name1} and #{name2}" # hello/2
end
IO.puts Greeter2.hello()
IO.puts Greeter2.hello("Fred")
IO.puts Greeter2.hello("Fred", "Jane")
!elixir temp.exs
# 関数とパターンマッチング
%%writefile temp.exs
defmodule Greeter1 do
def hello(%{name: person_name}) do
IO.puts "Hello, " <> person_name
end
end
fred = %{
name: "Fred",
age: "95",
favorite_color: "Taupe"
}
IO.puts Greeter1.hello(fred) #=> Hello, fred になる
#IO.puts Greeter1.hello(%{age: "95", favorite_color: "Taupe"}) #=> (FunctionClauseError) no function clause matching in Greeter1.hello/1
!elixir temp.exs
# Fredの名前を person_name にアサインしたいが、人物マップ全体の値も保持したいという場合
# マップを引数にすれば、別々の変数に格納することができる
%%writefile temp.exs
defmodule Greeter2 do
def hello(%{name: person_name} = person) do
IO.puts "Hello, " <> person_name
IO.inspect person
end
end
fred = %{
name: "Fred",
age: "95",
favorite_color: "Taupe"
}
Greeter2.hello(fred)
IO.puts("")
Greeter2.hello(%{name: "Fred"})
IO.puts("")
# Greeter2.hello(%{age: "95", favorite_color: "Taupe"}) #=> (FunctionClauseError) no function clause matching in Greeter2.hello/1
!elixir temp.exs
```
```
# %{name: person_name} と person の順序を入れ替えても、それぞれがfredとマッチングするので同じ結果となる
# 変数とマップを入れ替えてみる
# それぞれがパターンマッチしているので結果は同じになる
%%writefile temp.exs
defmodule Greeter3 do
def hello(person = %{name: person_name}) do
IO.puts "Hello, " <> person_name
IO.inspect person
end
end
fred = %{
name: "Fred",
age: "95",
favorite_color: "Taupe"
}
Greeter3.hello(fred)
IO.puts("")
Greeter3.hello(%{name: "Fred"})
!elixir temp.exs
# プライベート関数
# プライベート関数は defp を用いて定義する
# そのモジュール自身の内部からのみ呼び出すことが出来る
%%writefile temp.exs
defmodule Greeter do
def hello(name), do: phrase() <> name
defp phrase, do: "Hello, "
end
IO.puts Greeter.hello("Sean") #=> "Hello, Sean"
# IO.puts Greeter.phrase #=> (UndefinedFunctionError) function Greeter.phrase/0 is undefined or private
!elixir temp.exs
# ガード
%%writefile temp.exs
defmodule Greeter do
def hello(names) when is_list(names) do
names
|> Enum.join(", ")
|> hello
end
def hello(name) when is_binary(name) do
phrase() <> name
end
defp phrase, do: "Hello, "
end
IO.puts Greeter.hello ["Sean", "Steve"]
IO.puts Greeter.hello "Bill"
!elixir temp.exs
```
---
質問 Elixir のガードは Haskell のガードと同じか?
```
# デフォルト引数
# デフォルト値が欲しい場合、引数 \\ デフォルト値の記法を用いる
%%writefile temp.exs
defmodule Greeter do
def hello(name, language_code \\ "en") do
phrase(language_code) <> name
end
defp phrase("en"), do: "Hello, "
defp phrase("es"), do: "Hola, "
end
IO.puts Greeter.hello("Sean", "en")
IO.puts Greeter.hello("Sean")
IO.puts Greeter.hello("Sean", "es")
!elixir temp.exs
# ガードとデフォルト引数を組み合わせる場合
# 混乱を避けるためデフォルト引数を処理する定義を先に置く
%%writefile temp.exs
defmodule Greeter do
def hello(names, language_code \\ "en")
def hello(names, language_code) when is_list(names) do
names
|> Enum.join(", ")
|> hello(language_code)
end
def hello(name, language_code) when is_binary(name) do
phrase(language_code) <> name
end
defp phrase("en"), do: "Hello, "
defp phrase("es"), do: "Hola, "
end
IO.puts Greeter.hello ["Sean", "Steve"] #=> "Hello, Sean, Steve"
IO.puts Greeter.hello ["Sean", "Steve"], "es" #=> "Hola, Sean, Steve"
IO.puts Greeter.hello "Bob", "es"
!elixir temp.exs
# パイプライン演算子
# パイプライン演算子 `|>` はある式の結果を別の式に渡す
# 関数のネストを理解しやすくするためのもの
# 文字列をトークン化する、単語に分ける
!elixir -e 'IO.inspect "Elixir rocks" |> String.split()'
!elixir -e 'IO.inspect "Elixir rocks" |> String.upcase() |> String.split()'
# パイプラインを使う場合に関数の括弧は省略せずには入れた方がわかりやすい
!elixir -e 'IO.inspect "elixir" |> String.ends_with?("ixir")'
```
# モジュール
---
質問 いままで IO.puts とか一々モジュール名を付けていたが、elixir ではこれが普通なのか?
関数を作る際に一々モジュールを作成していたがあれで既存のモジュールに付け加えられているのか?
```
# モジュールの基本的な例
%%writefile temp.exs
defmodule Example do
def greeting(name) do
"Hello #{name}."
end
end
IO.puts Example.greeting "Sean"
!elixir temp.exs
# モジュールはネストする事ができる
%%writefile temp.exs
defmodule Example.Greetings do
def morning(name) do
"Good morning #{name}."
end
def evening(name) do
"Good night #{name}."
end
end
IO.puts Example.Greetings.morning "Sean"
!elixir temp.exs
# モジュールの属性
# モジュール属性は Elixir では一般に定数として用いられる
# Elixirには予約されている属性がある
# moduledoc — 現在のモジュールにドキュメントを付ける
# doc — 関数やマクロについてのドキュメント管理
# behaviour — OTPまたはユーザが定義した振る舞い(ビヘイビア)に用いる
%%writefile temp.exs
defmodule Example do
@greeting "Hello"
def greeting(name) do
~s(#{@greeting} #{name}.)
end
end
IO.puts Example.greeting "tak"
!elixir temp.exs
# 構造体 struct
# 構造体は定義済みのキーの一群とデフォルト値を持つマップである
# 定義するには defstruct を用いる
%%writefile temp.exs
defmodule Example.User do
defstruct name: "Sean", roles: []
end
defmodule Main do
IO.inspect %Example.User{}
IO.inspect %Example.User{name: "Steve"}
IO.inspect %Example.User{name: "Steve", roles: [:manager]}
end
!elixir temp.exs
# 構造体の更新
%%writefile temp.exs
defmodule Example.User do
defstruct name: "Sean", roles: []
end
defmodule Main do
steve = %Example.User{name: "Steve"}
IO.inspect %{steve | name: "Sean"}
IO.inspect steve
end
!elixir temp.exs
# 構造体の更新とマッチング
%%writefile temp.exs
defmodule Example.User do
defstruct name: "Sean", roles: []
end
defmodule Main do
steve = %Example.User{name: "Steve"}
sean = %{steve | name: "Sean"}
IO.inspect %{name: "Sean"} = sean
end
!elixir temp.exs
# inspect の出力を変える
%%writefile temp.exs
defmodule Example.User do
# @derive {Inspect, only: [:name]}
@derive {Inspect, except: [:roles]}
defstruct name: "Sean", roles: []
end
defmodule Main do
steve = %Example.User{name: "Steve"}
sean = %{steve | name: "Sean"}
IO.inspect %{name: "Sean"} = sean
end
!elixir temp.exs
# コンポジション(Composition)
# コンポジションを用いてモジュールや構造体に既存の機能を追加する
# alias モジュール名をエイリアスする
%%writefile temp.exs
defmodule Sayings.Greetings do
def basic(name), do: "Hi, #{name}"
end
defmodule Example do
alias Sayings.Greetings
def greeting(name), do: Greetings.basic(name)
end
IO.puts Example.greeting "Bob!!"
# aliasを使わない場合
# defmodule Example do
# def greeting(name), do: Sayings.Greetings.basic(name)
# end
!elixir temp.exs
# 別名で alias したい時は `:as` を使う
%%writefile temp.exs
defmodule Sayings.Greetings do
def basic(name), do: "Hi, #{name}"
end
defmodule Example do
alias Sayings.Greetings, as: Hi
def print_message(name), do: Hi.basic(name)
end
IO.puts Example.print_message "Chris!!"
!elixir temp.exs
# 複数のモジュールを一度に alias する
# defmodule Example do
# alias Sayings.{Greetings, Farewells}
# end
# import
# 関数を取り込みたいという場合には、 import を使う
!elixir -e 'import List; IO.inspect last([1,2,3])'
# フィルタリング
# import のデフォルトでは全ての関数とマクロが取り込まれるが、 :only や :except でフィルタすることができる
# アリティを付ける必要がある
%%writefile temp.exs
import List, only: [last: 1]
IO.inspect last([1,2,3])
# IO.inspect first([1,2,3]) #=> (CompileError) temp.exs:3: undefined function first/1 (there is no such import)
!elixir temp.exs
# import には :functions と :macros という2つの特別なアトムもありるこれらはそれぞれ関数とマクロのみを取り込む
# import List, only: :functions
# import List, only: :macros
# require と import の違いがわからない
# まだロードされていないマクロを呼びだそうとすると、Elixirはエラーを発生させる
# とのこと
# defmodule Example do
# require SuperMacros
#
# SuperMacros.do_stuff
# end
# use
# use マクロを用いることで他のモジュールを利用して現在のモジュールの定義を変更することができる
# コード上で use を呼び出すと、実際には提供されたモジュールに定義されている
# __using__/1 コールバックを呼び出している
%%writefile temp.exs
defmodule Hello do
defmacro __using__ _ do
quote do
def hello(name), do: "Hi, #{name}"
end
end
end
defmodule Example do
use Hello
end
IO.puts Example.hello("Sean")
!elixir temp.exs
# greeting オプションを追加する
%%writefile temp.exs
defmodule Hello do
defmacro __using__(opts) do
greeting = Keyword.get(opts, :greeting, "Hi")
quote do
def hello(name), do: unquote(greeting) <> ", " <> name
end
end
end
defmodule Example do
use Hello, greeting: "Hola"
end
IO.puts Example.hello("Sean")
!elixir temp.exs
```
# Mix
```
# mixとは Ruby の Bundler, RubyGems, Rake が組み合わさったようなもの
# colab の環境でやってみる
!mix new example
#=>
# * creating README.md
# * creating .formatter.exs
# * creating .gitignore
# * creating mix.exs
# * creating lib
# * creating lib/example.ex
# * creating test
# * creating test/test_helper.exs
# * creating test/example_test.exs
#
# Your Mix project was created successfully.
# You can use "mix" to compile it, test it, and more:
#
# cd example
# mix test
#
# Run "mix help" for more commands.
# colab 環境ではシステムコマンドを 1 行の中で書かないとディレクトリ内の処理ができない
!cd example; mix test
!cd example; ls -la
!cd example; cat mix.exs
#=> 次のフォーマットのプログラムが出来る
# defmodule Example.MixProject do
# use Mix.Project
# def project do # 名前(app)と依存関係(deps)が書かれている
# def application do
# defp deps do
# end
!cd example; iex -S mix
# iex で対話的に使うことが出来るが colab 環境では出来ない
# cd example
# iex -S mix
# compile
# mix はコードの変更を自動的にコンパイルする
# 明示的にコンパイルすることも出来る
# !cd example; mix compile
# rootディレクトリ以外から実行する場合は、グローバルmix taskのみが実行可能
!cd example; mix compile
!cd example; ls -la
!cd example; ls -laR _build
# 依存関係を管理する
# 新しい依存関係を追加するには、 mix.exs の deps 内に追加する
# パッケージ名のアトムと、バージョンを表す文字列)と1つの任意的な値(オプション)を持つタプル
# 実例として、phoenix_slimのようなプロジェクトの依存関係を見る
# def deps do
# [
# {:phoenix, "~> 1.1 or ~> 1.2"},
# {:phoenix_html, "~> 2.3"},
# {:cowboy, "~> 1.0", only: [:dev, :test]},
# {:slime, "~> 0.14"}
# ]
# end
# cowboy の依存は開発時とテスト時にのみ必要
# 依存しているパッケージの取り込みは bundle install に似たもの
# mix deps.get
!cd example/_build/test/lib/example/ebin; ./example.app #=> Permission denied
# colab 環境ではアプリは起動できないと言う事か
# 環境
# Bundler に似て、様々な環境に対応している
# mixは最初から 3 つの環境で動作するように構成されている
# :dev - 初期状態での環境。
# :test - mix testで用いられる環境。次のレッスンでさらに見ていきる
# :prod - アプリケーションを製品に出荷するときに用いられる環境。
# 現在の環境は Mix.env で取得することができる
# この環境は MIX_ENV 環境変数によって変更することが出来る
# MIX_ENV=prod mix compile
```
# シギル sigil
```
# シギル sigil とは elixir で文字列リテラルを取り扱うための特別の構文
# チルダ ~ で始まる
# シギルのリスト
# ~C エスケープや埋め込みを含まない文字のリストを生成する
# ~c エスケープや埋め込みを含む文字のリストを生成する
# ~R エスケープや埋め込みを含まない正規表現を生成する
# ~r エスケープや埋め込みを含む正規表現を生成する
# ~S エスケープや埋め込みを含まない文字列を生成する
# ~s エスケープや埋め込みを含む文字列を生成する
# ~W エスケープや埋め込みを含まない単語のリストを生成する
# ~w エスケープや埋め込みを含む単語のリストを生成する
# ~N NaiveDateTime 構造体を生成する
# デリミタのリスト
# <...> カギ括弧のペア angle bracket
# {...} 中括弧のペア brace
# [...] 大括弧のペア bracket
# (...) 小括弧のペア parenthesis
# |...| パイプ記号のペア pipe
# /.../ スラッシュのペア slash
# "..." ダブルクォートのペア double quote
# '...' シングルクォートのペア single quote
# 文字のリスト #=> tutorial と結果が違う!!!!
!elixir -e 'IO.puts ~c/2 + 7 = #{ 2 + 7 }/'
!elixir -e 'IO.puts ~C/2 + 7 = #{ 2 + 7 }/'
# 正規表現
!elixir -e 'IO.puts 3 == 3'
!elixir -e 'IO.puts "Elixir" =~ ~r/elixir/'
!elixir -e 'IO.puts "elixir" =~ ~r/elixir/'
!echo
!elixir -e 'IO.puts "Elixir" =~ ~r/elixir/i'
!elixir -e 'IO.puts "elixir" =~ ~r/elixir/i'
# Erlang の正規表現ライブラリを元に作られた Regex.split/2 を使う
!elixir -e 'string="100_000_000"; IO.inspect Regex.split(~r/_/, string)'
# 文字列
!elixir -e 'IO.puts ~s/welcome to elixir #{String.downcase "SCHOOL"}/'
!elixir -e 'IO.puts ~S/welcome to elixir #{String.downcase "SCHOOL"}/'
# 単語のリスト
!elixir -e 'IO.inspect ~w/i love elixir school/'
!elixir -e 'IO.inspect ~w/i love\telixir school/'
!elixir -e 'IO.inspect ~W/i love\telixir school/'
!elixir -e 'name="Bob"; IO.inspect ~w/i love #{name}lixir school/'
!elixir -e 'name="Bob"; IO.inspect ~W/i love #{name}lixir school/'
# NaiveDateTime
# NaiveDateTime は タイムゾーンがない DateTime を表現する構造体を手早く作るときに有用
# NaiveDateTime 構造体を直接作ることは避けるべき
# パターンマッチングには有用
!elixir -e 'IO.inspect NaiveDateTime.from_iso8601("2015-01-23 23:50:07") == {:ok, ~N[2015-01-23 23:50:07]}'
# シギルを作る
%%writefile temp.exs
defmodule MySigils do
def sigil_u(string, []), do: String.upcase(string)
end
defmodule Main do
import MySigils
IO.puts (~u/elixir school/)
end
!elixir temp.exs
```
**ドキュメント**
**インラインドキュメント用の属性**
* @moduledoc - モジュールレベルのドキュメント用
* @doc - 関数レベルのドキュメント用
省略
**テスト**
ExUnit
省略
# 内包表記
```
# 内包表記 list comprehension
# 内包表記は列挙体 enumerable をループするための糖衣構文である
!elixir -e 'list=[1,2,3,4,5];IO.inspect for x <- list, do: x*x'
# for とジェネレータの使い方に留意する
# ジェネレータとは `x <- list` の部分
# Haskell だと [x * x | x <- list] と書き、数学の集合での表記に近いが Elixir ではこのように書く
# 内包表記はリストに限定されない
# キーワードリスト
!elixir -e 'IO.inspect for {_key, val} <- [one: 1, two: 2, three: 3], do: val'
# マップ
!elixir -e 'IO.inspect for {k, v} <- %{"a" => "A", "b" => "B"}, do: {k, v}'
# バイナリ
!elixir -e 'IO.inspect for <<c <- "hello">>, do: <<c>>'
# ジェネレータは入力値セットと左辺の変数を比較するのにパターンマッチングを利用している
# マッチするものが見つからない場合には、値は無視される
!elixir -e 'IO.inspect for {:ok, val} <- [ok: "Hello", error: "Unknown", ok: "World"], do: val'
# 入れ子
%%writefile temp.exs
list = [1, 2, 3, 4]
IO.inspect (
for n <- list, times <- 1..n do
String.duplicate("*", times)
end
)
!elixir temp.exs
# ループの見える化
!elixir -e 'list = [1, 2, 3, 4]; for n <- list, times <- 1..n, do: IO.puts "#{n} - #{times}"'
# フィルタ
!elixir -e 'import Integer; IO.inspect for x <- 1..10, is_even(x), do: x'
# 偶数かつ 3 で割り切れる値のみをフィルタ
%%writefile temp.exs
import Integer
IO.inspect (
for x <- 1..100,
is_even(x),
rem(x, 3) == 0, do: x)
!elixir temp.exs
# :into の使用
# 他のものを生成したい場合
# :into は Collectable プロトコルを実装している構造体を指定する
# :into を用いて、キーワードリストからマップを作成する
!elixir -e 'IO.inspect for {k, v} <- [one: 1, two: 2, three: 3], into: %{}, do: {k, v}'
!elixir -e 'IO.inspect %{:one => 1, :three => 2, :two => 2}'
!elixir -e 'IO.inspect %{"one" => 1, "three" => 2, "two" => 2}'
# なるほど、と言うかわからなくて当然ですね。多分、Erlang の仕様を引き継いでこのようになっているのだろう
# map では高速なプログラムができなくて、キーワードリストを作って、キーワードリストはリストでありマップなのだろう
# ビット文字列 bitstring は列挙可能 enumerable なので、:into を用いて文字列を作成することが出来る
!elixir -e "IO.inspect for c <- [72, 101, 108, 108, 111], into: \"\", do: <<c>>"
```
# 文字列
```
# 文字列 string
# elixir の文字列はバイトのシーケンスである
!elixir -e 'string = <<104,101,108,108,111>>;IO.puts string'
!elixir -e 'string = <<104,101,108,108,111>>;IO.inspect string'
!elixir -e 'IO.inspect <<104,101,108,108,111>>'
!echo
# 文字列に 0 バイトを追加するとバイナリとして表示される
!elixir -e 'IO.inspect <<104,101,108,108,111,0>>'
# 質問 文字列をバイナリ表示するにはどうするか
!elixir -e 'IO.inspect "hello"<> <<0>>'
# 実験 日本語
!elixir -e 'IO.inspect "あ"<> <<0>>' #=> <<227, 129, 130, 0>>
!elixir -e 'IO.inspect <<227, 129, 130>>' #=> "あ"
# 文字リスト
# elixir は文字列と別に文字リストという型を別に持っている
# 文字列はダブルクオートで生成され、文字リストはシングルクオートで生成される
# 文字リストは utf-8 で、文字列はバイナリである
!elixir -e "IO.inspect 'hello'"
!elixir -e "IO.inspect 'hello' ++ [0]"
!elixir -e 'IO.inspect "hello"<> <<0>>'
!echo
!elixir -e "IO.inspect 'hełło' ++ [0]"
!elixir -e 'IO.inspect "hełło"<> <<0>>'
!echo
!elixir -e "IO.inspect 'あ' ++ [0]"
!elixir -e 'IO.inspect "あ"<> <<0>>'
# クエスチョンマークによるコードポイントの取得
# コードポイントは unicode なので 1 バイト以上のバイトである
!elixir -e 'IO.inspect ?Z'
!elixir -e 'IO.inspect ?あ'
!elixir -e 'IO.inspect "áñèane" <> <<0>>'
!elixir -e "IO.inspect 'áñèane' ++ [0]"
!elixir -e "IO.inspect 'あいう' ++ [0]"
# シンボルには ? 表記が使える
# elixir でプログラムする時は通常文字リストは使わず文字列を使う
# 文字リストが必要なのは erlang のため
# String モジュールにコードポイントを取得する関数 graphemes/1 と codepoints/1 がある
!elixir -e 'string = "\u0061\u0301"; IO.puts string' #=> á
!elixir -e 'string = "\u0061\u0301"; IO.inspect String.codepoints string'
!elixir -e 'string = "\u0061\u0301"; IO.inspect String.graphemes string'
# 下記の実験から á と あ は違う
# á は graphemes では 1 文字だが codepoints では 2 文字
# あ はどちらでも 1 文字
!elixir -e 'string = "あいう"; IO.puts string'
!elixir -e 'string = "あいう"; IO.inspect String.codepoints string'
!elixir -e 'string = "あいう"; IO.inspect String.graphemes string'
# 文字列関数
# length/1
!elixir -e 'IO.puts String.length "hello"'
!elixir -e 'IO.puts String.length "あいう"'
# replace/3
!elixir -e 'IO.puts String.replace("Hello", "e", "a")'
# duplicate/2
!elixir -e 'IO.puts String.duplicate("Oh my ", 3)'
# split/2
!elixir -e 'IO.inspect String.split("Oh my ", " ")'
# split/1 # こちらが words 相当か
!elixir -e 'IO.inspect String.split("Oh my ")'
# 問題 アナグラムチェック
# A = super
# B = perus
# 文字列 A を並び替えれば B に出来るので A は B のアナグラム
%%writefile temp.exs
defmodule Anagram do
def anagrams?(a, b) when is_binary(a) and is_binary(b) do
sort_string(a) == sort_string(b)
end
def sort_string(string) do
string
|> String.downcase()
|> String.graphemes()
|> Enum.sort()
end
end
defmodule Main do
IO.puts Anagram.anagrams?("Hello", "ohell")
IO.puts Anagram.anagrams?("María", "íMara")
IO.puts Anagram.anagrams?(3, 5) #=> エラー
end
!elixir temp.exs
```
# 日付と時間
```
# 日付と時間
# 現在時刻の取得
!elixir -e 'IO.puts Time.utc_now'
# シギルで Time 構造体を作る
!elixir -e 'IO.puts ~T[21:00:27.472988]'
# hour, minute, second
!elixir -e 't = ~T[21:00:27.472988];IO.puts t.hour'
!elixir -e 't = ~T[21:00:27.472988];IO.puts t.minute'
!elixir -e 't = ~T[21:00:27.472988];IO.puts t.second'
# Date
!elixir -e 'IO.puts Date.utc_today'
# シギルで Date 構造体を作る
!elixir -e 'IO.puts ~D[2022-03-22]'
#
!elixir -e '{:ok, date} = Date.new(2020, 12,12); IO.puts date'
!elixir -e '{:ok, date} = Date.new(2020, 12,12); IO.puts Date.day_of_week date'
!elixir -e '{:ok, date} = Date.new(2020, 12,12); IO.puts Date.leap_year? date'
!echo
# NaiveDateTime Date と Time の両方を扱えるがタイムゾーンのサポートがない
!elixir -e 'IO.puts NaiveDateTime.utc_now'
!elixir -e 'IO.puts ~N[2022-03-22 21:14:23.371420]'
!elixir -e 'IO.puts NaiveDateTime.add(~N[2022-03-22 21:14:23.371420],30)'
!elixir -e 'IO.puts NaiveDateTime.add(~N[2022-03-22 21:14:23],30)'
# DateTime
# DateTime は Date と Time の両方を扱えタイムゾーンのサポートがある
# しかし!!!! Elixir がデフォルトではタイムゾーンデータベースがない
# デフォルトでは Calendar.get_time_zone_database/0 によって返されるタイムゾーンデータベースを使う
# デフォルトでは Calendar.UTCOnlyTimeZoneDatabase で、Etc/UTC のみを処理し
# 他のタイムゾーンでは {:error, :utc_only_time_zone_database} を返す
# タイムゾーンを提供することにより NaiveDateTime から DateTimeのインスタンスを作ることができる
!elixir -e 'IO.inspect DateTime.from_naive(~N[2016-05-24 13:26:08.003], "Etc/UTC")'
# タイムゾーンの利用
# elixir でタイムゾーンを利用するには tzdata パッケージをインストールし
# Tzdata タイムゾーンデータベースとして使用する
# パリのタイムゾーンで時間を作成してそれをニューヨーク時間に変換してみる
# パリとニューヨークの時差は 6 時間である
# %%writefile temp.exs
# config :elixir, :time_zone_database, Tzdata.TimeZoneDatabase
# paris_datetime = DateTime.from_naive!(~N[2019-01-01 12:00:00], "Europe/Paris")
# {:ok, ny_datetime} = DateTime.shift_zone(paris_datetime, "America/New_York")
# IO.inspect paris_datetime
# IO.inspect ny_datetime
```
# カスタムMixタスク 省略
# いまここ
# IEx Helpers 省略
```
```
| github_jupyter |
```
# Import modules
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
# Plot configurations
%matplotlib inline
# Notebook auto reloads code.
%load_ext autoreload
%autoreload 2
```
# NeuroTorch Tutorial
**NeuroTorch** is a framework for reconstructing neuronal morphology from
optical microscopy images. It interfaces PyTorch with different
automated neuron tracing algorithms for fast, accurate, scalable
neuronal reconstructions. It uses deep learning to generate an initial
segmentation of neurons in optical microscopy images. This
segmentation is then traced using various automated neuron tracing
algorithms to convert the segmentation into an SWC file—the most
common neuronal morphology file format. NeuroTorch is designed with
scalability in mind and can handle teravoxel-sized images.
This IPython notebook will outline a brief tutorial for using NeuroTorch
to train and predict on image volume datasets.
## Creating image datasets
One of NeuroTorch’s key features is its dynamic approach to volumetric datasets, which allows it to handle teravoxel-sized images without worrying about memory concerns and efficiency. Everything is loaded just-in-time based on when it is needed or expected to be needed. To load an image dataset, we need
to specify the voxel coordinates of each image file as shown in files `inputs_spec.json` and `labels_spec.json`.
### `inputs_spec.json`
```json
[
{
"filename" : "inputs.tif",
"bounding_box" : [[0, 0, 0], [1024, 512, 50]]
},
{
"filename" : "inputs.tif",
"bounding_box" : [[0, 0, 50], [1024, 512, 100]]
}
]
```
### `labels_spec.json`
```json
[
{
"filename" : "labels.tif",
"bounding_box" : [[0, 0, 0], [1024, 512, 50]]
},
{
"filename" : "labels.tif",
"bounding_box" : [[0, 0, 50], [1024, 512, 100]]
}
]
```
## Loading image datasets
Now that the image datasets for the inputs and labels have been specified,
these datasets can be loaded with NeuroTorch.
```
from neurotorch.datasets.specification import JsonSpec
import os
IMAGE_PATH = '../../tests/images/'
json_spec = JsonSpec() # Initialize the JSON specification
# Create a dataset containing the inputs
inputs = json_spec.open(os.path.join(IMAGE_PATH,
"inputs_spec.json"))
# Create a dataset containing the labels
labels = json_spec.open(os.path.join(IMAGE_PATH,
"labels_spec.json"))
```
## Augmenting datasets
With the image datasets, it is possible to augment data on-the-fly. To implement an augmentation–such as branch occlusion—instantiate an aligned volume and specify the augmentation with the aligned volume.
```
from neurotorch.datasets.dataset import AlignedVolume
from neurotorch.augmentations.occlusion import Occlusion
from neurotorch.augmentations.blur import Blur
from neurotorch.augmentations.brightness import Brightness
from neurotorch.augmentations.dropped import Drop
from neurotorch.augmentations.duplicate import Duplicate
from neurotorch.augmentations.stitch import Stitch
from neurotorch.augmentations.occlusion import Occlusion
volume = AlignedVolume([inputs, labels])
augmented_volume = Occlusion(volume, frequency=0.5)
augmented_volume = Stitch(augmented_volume, frequency=0.5)
augmented_volume = Drop(volume, frequency=0.5)
augmented_volume = Blur(augmented_volume, frequency=0.5)
augmented_volume = Duplicate(augmented_volume, frequency=0.5)
```
## Training with the image datasets
To train a neural network using these image datasets, load the
neural network architecture and initialize a `Trainer`. To save
training checkpoints, add a `CheckpointWriter` to the `Trainer` object.
Lastly, call the `Trainer` object to run training.
```
from neurotorch.core.trainer import Trainer
from neurotorch.nets.RSUNet import RSUNet
from neurotorch.training.checkpoint import CheckpointWriter
from neurotorch.training.logging import ImageWriter, LossWriter
net = RSUNet() # Initialize the U-Net architecture
# Setup the trainer
trainer = Trainer(net, augmented_volume, max_epochs=10,
gpu_device=0)
# Setup the trainer the add a checkpoint every 500 epochs
trainer = LossWriter(trainer, ".", "tutorial_tensorboard")
trainer = ImageWriter(trainer, ".", "tutorial_tensorboard")
trainer = CheckpointWriter(trainer, checkpoint_dir='.',
checkpoint_period=50)
trainer.run_training()
```
## Predicting using NeuroTorch
Once training has completed, we can use the training checkpoints
to predict on image datasets. We first have to
load the neural network architecture and image volume.
We then have to initialize a `Predictor` object and an output volume.
Once these have been specified, we can begin prediction.
```
from neurotorch.nets.RSUNet import RSUNet
from neurotorch.core.predictor import Predictor
from neurotorch.datasets.filetypes import TiffVolume
from neurotorch.datasets.dataset import Array
from neurotorch.datasets.datatypes import (BoundingBox, Vector)
import numpy as np
import tifffile as tif
import os
IMAGE_PATH = '../../tests/images/'
net = RSUNet() # Initialize the U-Net architecture
checkpoint = './iteration_1000.ckpt' # Specify the checkpoint path
with TiffVolume(os.path.join(IMAGE_PATH,
"inputs.tif"),
BoundingBox(Vector(0, 0, 0),
Vector(1024, 512, 50))) as inputs:
predictor = Predictor(net, checkpoint, gpu_device=0)
output_volume = Array(np.zeros(inputs.getBoundingBox()
.getNumpyDim(), dtype=np.float32))
predictor.run(inputs, output_volume, batch_size=5)
tif.imsave("test_prediction.tif",
output_volume.getArray().astype(np.float32))
```
## Displaying the prediction
Predictions are output in logits form. To map this to a
probability distribution, we need to apply a sigmoid function
to the prediction. We can then evaluate the prediction and
ground-truth.
```
# Apply sigmoid function
probability_map = 1/(1+np.exp(-output_volume.getArray()))
# Plot prediction and ground-truth
plt.subplot(2, 1, 1)
plt.title('Prediction')
plt.imshow(output_volume.getArray()[25])
plt.axis('off')
plt.subplot(2, 1, 2)
plt.title('Ground-Truth')
plt.imshow(labels.get(
BoundingBox(Vector(0, 0, 0),
Vector(1024, 512, 50))).getArray()[25],
cmap='gray'
)
plt.axis('off')
plt.show()
```
| github_jupyter |
This IPython Notebook introduces the use of the `openmc.mgxs` module to calculate multi-group cross sections for an infinite homogeneous medium. In particular, this Notebook introduces the the following features:
* **General equations** for scalar-flux averaged multi-group cross sections
* Creation of multi-group cross sections for an **infinite homogeneous medium**
* Use of **tally arithmetic** to manipulate multi-group cross sections
## Introduction to Multi-Group Cross Sections (MGXS)
Many Monte Carlo particle transport codes, including OpenMC, use continuous-energy nuclear cross section data. However, most deterministic neutron transport codes use *multi-group cross sections* defined over discretized energy bins or *energy groups*. An example of U-235's continuous-energy fission cross section along with a 16-group cross section computed for a light water reactor spectrum is displayed below.
```
from IPython.display import Image
Image(filename='images/mgxs.png', width=350)
```
A variety of tools employing different methodologies have been developed over the years to compute multi-group cross sections for certain applications, including NJOY (LANL), MC$^2$-3 (ANL), and Serpent (VTT). The `openmc.mgxs` Python module is designed to leverage OpenMC's tally system to calculate multi-group cross sections with arbitrary energy discretizations for fine-mesh heterogeneous deterministic neutron transport applications.
Before proceeding to illustrate how one may use the `openmc.mgxs` module, it is worthwhile to define the general equations used to calculate multi-group cross sections. This is only intended as a brief overview of the methodology used by `openmc.mgxs` - we refer the interested reader to the large body of literature on the subject for a more comprehensive understanding of this complex topic.
### Introductory Notation
The continuous real-valued microscopic cross section may be denoted $\sigma_{n,x}(\mathbf{r}, E)$ for position vector $\mathbf{r}$, energy $E$, nuclide $n$ and interaction type $x$. Similarly, the scalar neutron flux may be denoted by $\Phi(\mathbf{r},E)$ for position $\mathbf{r}$ and energy $E$. **Note**: Although nuclear cross sections are dependent on the temperature $T$ of the interacting medium, the temperature variable is neglected here for brevity.
### Spatial and Energy Discretization
The energy domain for critical systems such as thermal reactors spans more than 10 orders of magnitude of neutron energies from 10$^{-5}$ - 10$^7$ eV. The multi-group approximation discretization divides this energy range into one or more energy groups. In particular, for $G$ total groups, we denote an energy group index $g$ such that $g \in \{1, 2, ..., G\}$. The energy group indices are defined such that the smaller group the higher the energy, and vice versa. The integration over neutron energies across a discrete energy group is commonly referred to as **energy condensation**.
Multi-group cross sections are computed for discretized spatial zones in the geometry of interest. The spatial zones may be defined on a structured and regular fuel assembly or pin cell mesh, an arbitrary unstructured mesh or the constructive solid geometry used by OpenMC. For a geometry with $K$ distinct spatial zones, we designate each spatial zone an index $k$ such that $k \in \{1, 2, ..., K\}$. The volume of each spatial zone is denoted by $V_{k}$. The integration over discrete spatial zones is commonly referred to as **spatial homogenization**.
### General Scalar-Flux Weighted MGXS
The multi-group cross sections computed by `openmc.mgxs` are defined as a *scalar flux-weighted average* of the microscopic cross sections across each discrete energy group. This formulation is employed in order to preserve the reaction rates within each energy group and spatial zone. In particular, spatial homogenization and energy condensation are used to compute the general multi-group cross section $\sigma_{n,x,k,g}$ as follows:
$$\sigma_{n,x,k,g} = \frac{\int_{E_{g}}^{E_{g-1}}\mathrm{d}E'\int_{\mathbf{r} \in V_{k}}\mathrm{d}\mathbf{r}\sigma_{n,x}(\mathbf{r},E')\Phi(\mathbf{r},E')}{\int_{E_{g}}^{E_{g-1}}\mathrm{d}E'\int_{\mathbf{r} \in V_{k}}\mathrm{d}\mathbf{r}\Phi(\mathbf{r},E')}$$
This scalar flux-weighted average microscopic cross section is computed by `openmc.mgxs` for most multi-group cross sections, including total, absorption, and fission reaction types. These double integrals are stochastically computed with OpenMC's tally system - in particular, [filters](http://openmc.readthedocs.io/en/latest/usersguide/tallies.html#filters) on the energy range and spatial zone (material, cell or universe) define the bounds of integration for both numerator and denominator.
### Multi-Group Scattering Matrices
The general multi-group cross section $\sigma_{n,x,k,g}$ is a vector of $G$ values for each energy group $g$. The equation presented above only discretizes the energy of the incoming neutron and neglects the outgoing energy of the neutron (if any). Hence, this formulation must be extended to account for the outgoing energy of neutrons in the discretized scattering matrix cross section used by deterministic neutron transport codes.
We denote the incoming and outgoing neutron energy groups as $g$ and $g'$ for the microscopic scattering matrix cross section $\sigma_{n,s}(\mathbf{r},E)$. As before, spatial homogenization and energy condensation are used to find the multi-group scattering matrix cross section $\sigma_{n,s,k,g \to g'}$ as follows:
$$\sigma_{n,s,k,g\rightarrow g'} = \frac{\int_{E_{g'}}^{E_{g'-1}}\mathrm{d}E''\int_{E_{g}}^{E_{g-1}}\mathrm{d}E'\int_{\mathbf{r} \in V_{k}}\mathrm{d}\mathbf{r}\sigma_{n,s}(\mathbf{r},E'\rightarrow E'')\Phi(\mathbf{r},E')}{\int_{E_{g}}^{E_{g-1}}\mathrm{d}E'\int_{\mathbf{r} \in V_{k}}\mathrm{d}\mathbf{r}\Phi(\mathbf{r},E')}$$
This scalar flux-weighted multi-group microscopic scattering matrix is computed using OpenMC tallies with both energy in and energy out filters.
### Multi-Group Fission Spectrum
The energy spectrum of neutrons emitted from fission is denoted by $\chi_{n}(\mathbf{r},E' \rightarrow E'')$ for incoming and outgoing energies $E'$ and $E''$, respectively. Unlike the multi-group cross sections $\sigma_{n,x,k,g}$ considered up to this point, the fission spectrum is a probability distribution and must sum to unity. The outgoing energy is typically much less dependent on the incoming energy for fission than for scattering interactions. As a result, it is common practice to integrate over the incoming neutron energy when computing the multi-group fission spectrum. The fission spectrum may be simplified as $\chi_{n}(\mathbf{r},E)$ with outgoing energy $E$.
Unlike the multi-group cross sections defined up to this point, the multi-group fission spectrum is weighted by the fission production rate rather than the scalar flux. This formulation is intended to preserve the total fission production rate in the multi-group deterministic calculation. In order to mathematically define the multi-group fission spectrum, we denote the microscopic fission cross section as $\sigma_{n,f}(\mathbf{r},E)$ and the average number of neutrons emitted from fission interactions with nuclide $n$ as $\nu_{n}(\mathbf{r},E)$. The multi-group fission spectrum $\chi_{n,k,g}$ is then the probability of fission neutrons emitted into energy group $g$.
Similar to before, spatial homogenization and energy condensation are used to find the multi-group fission spectrum $\chi_{n,k,g}$ as follows:
$$\chi_{n,k,g'} = \frac{\int_{E_{g'}}^{E_{g'-1}}\mathrm{d}E''\int_{0}^{\infty}\mathrm{d}E'\int_{\mathbf{r} \in V_{k}}\mathrm{d}\mathbf{r}\chi_{n}(\mathbf{r},E'\rightarrow E'')\nu_{n}(\mathbf{r},E')\sigma_{n,f}(\mathbf{r},E')\Phi(\mathbf{r},E')}{\int_{0}^{\infty}\mathrm{d}E'\int_{\mathbf{r} \in V_{k}}\mathrm{d}\mathbf{r}\nu_{n}(\mathbf{r},E')\sigma_{n,f}(\mathbf{r},E')\Phi(\mathbf{r},E')}$$
The fission production-weighted multi-group fission spectrum is computed using OpenMC tallies with both energy in and energy out filters.
This concludes our brief overview on the methodology to compute multi-group cross sections. The following sections detail more concretely how users may employ the `openmc.mgxs` module to power simulation workflows requiring multi-group cross sections for downstream deterministic calculations.
## Generate Input Files
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import openmc
import openmc.mgxs as mgxs
```
We being by creating a material for the homogeneous medium.
```
# Instantiate a Material and register the Nuclides
inf_medium = openmc.Material(name='moderator')
inf_medium.set_density('g/cc', 5.)
inf_medium.add_nuclide('H1', 0.028999667)
inf_medium.add_nuclide('O16', 0.01450188)
inf_medium.add_nuclide('U235', 0.000114142)
inf_medium.add_nuclide('U238', 0.006886019)
inf_medium.add_nuclide('Zr90', 0.002116053)
```
With our material, we can now create a `Materials` object that can be exported to an actual XML file.
```
# Instantiate a Materials collection and export to XML
materials_file = openmc.Materials([inf_medium])
materials_file.export_to_xml()
```
Now let's move on to the geometry. This problem will be a simple square cell with reflective boundary conditions to simulate an infinite homogeneous medium. The first step is to create the outer bounding surfaces of the problem.
```
# Instantiate boundary Planes
min_x = openmc.XPlane(boundary_type='reflective', x0=-0.63)
max_x = openmc.XPlane(boundary_type='reflective', x0=0.63)
min_y = openmc.YPlane(boundary_type='reflective', y0=-0.63)
max_y = openmc.YPlane(boundary_type='reflective', y0=0.63)
```
With the surfaces defined, we can now create a cell that is defined by intersections of half-spaces created by the surfaces.
```
# Instantiate a Cell
cell = openmc.Cell(cell_id=1, name='cell')
# Register bounding Surfaces with the Cell
cell.region = +min_x & -max_x & +min_y & -max_y
# Fill the Cell with the Material
cell.fill = inf_medium
```
OpenMC requires that there is a "root" universe. Let us create a root universe and add our square cell to it.
```
# Create root universe
root_universe = openmc.Universe(name='root universe', cells=[cell])
```
We now must create a geometry that is assigned a root universe and export it to XML.
```
# Create Geometry and set root Universe
openmc_geometry = openmc.Geometry(root_universe)
# Export to "geometry.xml"
openmc_geometry.export_to_xml()
```
Next, we must define simulation parameters. In this case, we will use 10 inactive batches and 40 active batches each with 2500 particles.
```
# OpenMC simulation parameters
batches = 50
inactive = 10
particles = 2500
# Instantiate a Settings object
settings_file = openmc.Settings()
settings_file.batches = batches
settings_file.inactive = inactive
settings_file.particles = particles
settings_file.output = {'tallies': True}
# Create an initial uniform spatial source distribution over fissionable zones
bounds = [-0.63, -0.63, -0.63, 0.63, 0.63, 0.63]
uniform_dist = openmc.stats.Box(bounds[:3], bounds[3:], only_fissionable=True)
settings_file.source = openmc.source.Source(space=uniform_dist)
# Export to "settings.xml"
settings_file.export_to_xml()
```
Now we are ready to generate multi-group cross sections! First, let's define a 2-group structure using the built-in `EnergyGroups` class.
```
# Instantiate a 2-group EnergyGroups object
groups = mgxs.EnergyGroups()
groups.group_edges = np.array([0., 0.625, 20.0e6])
```
We can now use the `EnergyGroups` object, along with our previously created materials and geometry, to instantiate some `MGXS` objects from the `openmc.mgxs` module. In particular, the following are subclasses of the generic and abstract `MGXS` class:
* `TotalXS`
* `TransportXS`
* `AbsorptionXS`
* `CaptureXS`
* `FissionXS`
* `KappaFissionXS`
* `ScatterXS`
* `ScatterMatrixXS`
* `Chi`
* `ChiPrompt`
* `InverseVelocity`
* `PromptNuFissionXS`
Of course, we are aware that the fission cross section (`FissionXS`) can sometimes be paired with the fission neutron multiplication to become $\nu\sigma_f$. This can be accomodated in to the `FissionXS` class by setting the `nu` parameter to `True` as shown below.
Additionally, scattering reactions (like (n,2n)) can also be defined to take in to account the neutron multiplication to become $\nu\sigma_s$. This can be accomodated in the the transport (`TransportXS`), scattering (`ScatterXS`), and scattering-matrix (`ScatterMatrixXS`) cross sections types by setting the `nu` parameter to `True` as shown below.
These classes provide us with an interface to generate the tally inputs as well as perform post-processing of OpenMC's tally data to compute the respective multi-group cross sections. In this case, let's create the multi-group total, absorption and scattering cross sections with our 2-group structure.
```
# Instantiate a few different sections
total = mgxs.TotalXS(domain=cell, groups=groups)
absorption = mgxs.AbsorptionXS(domain=cell, groups=groups)
scattering = mgxs.ScatterXS(domain=cell, groups=groups)
# Note that if we wanted to incorporate neutron multiplication in the
# scattering cross section we would write the previous line as:
# scattering = mgxs.ScatterXS(domain=cell, groups=groups, nu=True)
```
Each multi-group cross section object stores its tallies in a Python dictionary called `tallies`. We can inspect the tallies in the dictionary for our `Absorption` object as follows.
```
absorption.tallies
```
The `Absorption` object includes tracklength tallies for the 'absorption' and 'flux' scores in the 2-group structure in cell 1. Now that each `MGXS` object contains the tallies that it needs, we must add these tallies to a `Tallies` object to generate the "tallies.xml" input file for OpenMC.
```
# Instantiate an empty Tallies object
tallies_file = openmc.Tallies()
# Add total tallies to the tallies file
tallies_file += total.tallies.values()
# Add absorption tallies to the tallies file
tallies_file += absorption.tallies.values()
# Add scattering tallies to the tallies file
tallies_file += scattering.tallies.values()
# Export to "tallies.xml"
tallies_file.export_to_xml()
```
Now we a have a complete set of inputs, so we can go ahead and run our simulation.
```
# Run OpenMC
openmc.run()
```
## Tally Data Processing
Our simulation ran successfully and created statepoint and summary output files. We begin our analysis by instantiating a `StatePoint` object.
```
# Load the last statepoint file
sp = openmc.StatePoint('statepoint.50.h5')
```
In addition to the statepoint file, our simulation also created a summary file which encapsulates information about the materials and geometry. By default, a `Summary` object is automatically linked when a `StatePoint` is loaded. This is necessary for the `openmc.mgxs` module to properly process the tally data.
The statepoint is now ready to be analyzed by our multi-group cross sections. We simply have to load the tallies from the `StatePoint` into each object as follows and our `MGXS` objects will compute the cross sections for us under-the-hood.
```
# Load the tallies from the statepoint into each MGXS object
total.load_from_statepoint(sp)
absorption.load_from_statepoint(sp)
scattering.load_from_statepoint(sp)
```
Voila! Our multi-group cross sections are now ready to rock 'n roll!
## Extracting and Storing MGXS Data
Let's first inspect our total cross section by printing it to the screen.
```
total.print_xs()
```
Since the `openmc.mgxs` module uses [tally arithmetic](http://openmc.readthedocs.io/en/latest/examples/tally-arithmetic.html) under-the-hood, the cross section is stored as a "derived" `Tally` object. This means that it can be queried and manipulated using all of the same methods supported for the `Tally` class in the OpenMC Python API. For example, we can construct a [Pandas](http://pandas.pydata.org/) `DataFrame` of the multi-group cross section data.
```
df = scattering.get_pandas_dataframe()
df.head(10)
```
Each multi-group cross section object can be easily exported to a variety of file formats, including CSV, Excel, and LaTeX for storage or data processing.
```
absorption.export_xs_data(filename='absorption-xs', format='excel')
```
The following code snippet shows how to export all three `MGXS` to the same HDF5 binary data store.
```
total.build_hdf5_store(filename='mgxs', append=True)
absorption.build_hdf5_store(filename='mgxs', append=True)
scattering.build_hdf5_store(filename='mgxs', append=True)
```
## Comparing MGXS with Tally Arithmetic
Finally, we illustrate how one can leverage OpenMC's [tally arithmetic](http://openmc.readthedocs.io/en/latest/examples/tally-arithmetic.html) data processing feature with `MGXS` objects. The `openmc.mgxs` module uses tally arithmetic to compute multi-group cross sections with automated uncertainty propagation. Each `MGXS` object includes an `xs_tally` attribute which is a "derived" `Tally` based on the tallies needed to compute the cross section type of interest. These derived tallies can be used in subsequent tally arithmetic operations. For example, we can use tally artithmetic to confirm that the `TotalXS` is equal to the sum of the `AbsorptionXS` and `ScatterXS` objects.
```
# Use tally arithmetic to compute the difference between the total, absorption and scattering
difference = total.xs_tally - absorption.xs_tally - scattering.xs_tally
# The difference is a derived tally which can generate Pandas DataFrames for inspection
difference.get_pandas_dataframe()
```
Similarly, we can use tally arithmetic to compute the ratio of `AbsorptionXS` and `ScatterXS` to the `TotalXS`.
```
# Use tally arithmetic to compute the absorption-to-total MGXS ratio
absorption_to_total = absorption.xs_tally / total.xs_tally
# The absorption-to-total ratio is a derived tally which can generate Pandas DataFrames for inspection
absorption_to_total.get_pandas_dataframe()
# Use tally arithmetic to compute the scattering-to-total MGXS ratio
scattering_to_total = scattering.xs_tally / total.xs_tally
# The scattering-to-total ratio is a derived tally which can generate Pandas DataFrames for inspection
scattering_to_total.get_pandas_dataframe()
```
Lastly, we sum the derived scatter-to-total and absorption-to-total ratios to confirm that they sum to unity.
```
# Use tally arithmetic to ensure that the absorption- and scattering-to-total MGXS ratios sum to unity
sum_ratio = absorption_to_total + scattering_to_total
# The sum ratio is a derived tally which can generate Pandas DataFrames for inspection
sum_ratio.get_pandas_dataframe()
```
| github_jupyter |
# Bungee Characterization Lab
## PH 211 COCC
### Bruce Emerson 1/20/2021
This notebook is meant to provide tools and discussion to support data analysis and presentation as you generate your lab reports.
[Bungee Characterization (Bungee I)](http://coccweb.cocc.edu/bemerson/PhysicsGlobal/Courses/PH211/PH211Materials/PH211Labs/PH211LabbungeeI.html) and [Bungee I Lab Discussion](http://coccweb.cocc.edu/bemerson/PhysicsGlobal/Courses/PH211/PH211Materials/PH211Labs/PH211LabDbungeeI.html)
In this lab we are gathering some data, entering the data into the notebook, plotting the data as a scatterplot, plotting a physics model of the bungee, and finally looking for patterns through normalizing the data.
For the formal lab report you will want to create your own description of what you understand the process and intended outcome of the lab is. Please don't just copy the purpose statement from the lab page.
## Dependencies
This is where we load in the various libraries of python tools that are needed for the particular work we are undertaking.
```numpy``` is a numerical tools library - often imported as np. ```numpy``` also contains the statistical tools that we will use in this lab. There are other libraries dedicated to statistical tools but 'numpy' has everything we need.
```matplotlib```is a 'MATLAB like' library.
```matplotlib.pyplot``` is often imported as ```plt``` to make it easier to use. ```matplotlib``` has the plotting tools that we need for this lab.
The following code cell will need to be run first before any other code cells.
```
import numpy as np
import matplotlib as mplot
import matplotlib.pyplot as plt
```
## Data Entry (Lists/Vectors)
As we learned last week we can manually enter our data in as lists. See last weeks lab for reminders if needed. In this lab we are working with data pairs (x,y data). There are a number of ways of doing this but the most conceptually direct approach is to create an ordered list of the xdata and the ydata separately. Notice that I can 'fold' long lines of data by entering a new line after the comma. This is handy when manually entering data. The data shown here is completely manufactured but has some of the same characteristics as the data you are gathering.
Be aware that you will gathering two sets of data yourself and getting a third data set from another group. Plan out how you will keep track of each data set with thoughtful naming choices.
### Comments in Code:
From this point going forward I will be looking for consistent description of what is happening in the code cells both within and before the code cell. You are of little value to a future employer if they can't hand you work to another employee who can make sense of what you did. A good metric is you should spend at least as much effort commenting and explaining what you're doing as you do actually doing the work.
In a python code cell any line that starts with a '#' will be ignored by python and interpreted as a comment.
```# this is the actual data from your experiment```
This is a typical format of a comment that is easy to read in the code. It is sometimes helpful to comment at the end of a line to explain particular items in that line.
```ydata2 = [2., 3.] # I can also comment at the end of a line```
```
# this is the actual data from your experiment
xdata1 = [3.23961446, 12.3658087, 27.08638038, 36.88808393,
48.5373278, 43.90496472, 75.81073494, 105.42389529,
123.53497036, 158.87537602]
ydata1 = [0.62146893, 1.53513096, 3.97591135,
4.54284862, 6.23415512, 5.12951366,
6.1733864, 7.9524996, 8.90050684, 10.29383595]
# these are a couple of specific data point I want to scatterplot on top of my plot
xdata2 = [60., 100.]
ydata2 = [2., 3.] # I can also comment at the end of a line
# print out and check my data
print("stretch data:",xdata1)
print("force data:",ydata1)
```
## Number of Data Points:
Because we are scatter plotting the data we need to be sure that every x value has a related y value or the plotting routines will complain. Previously we learned to use the ```len()``` function to determine the number of data points in a list. We do that again here.
#### Extra: Conditional Statements:
It seems reasonable that we could ask python to check whether the two data sets are the same length and we can. There are a number of what are called conditional statements. The "if-else' statement is one of these.
[if-else examples](https://pythonguides.com/python-if-else/)
```
if (xdata1length = ydata1length):
print("Looks good:)")
else
print("Something is wrong here!!!")
```
Inside the parentheses is the conditional statement which, in this case, asks if ```xdata1length = ydata1length```. 'If' this statement is true then python will look at the next line(s) to see what it should do. If the conditional statement is false (not true) python will look for an ```else``` command and do whatever is on the lines after the else statement. Python expects that everything related to the ```if-else``` statement will be indented after the line where it begins. The next line of code (or even a blank line) that is NOT indented represents the end of the conditional statement. Just play with a few things in the statement if you have time and see what happens.
***
## Lab Deliverable:
For your lab notebook you will include the usual 'header' information we talked about last week in the first cell of the lab (a markdown cell for sure). After the header cell describe the process by which you collected data from you micro-bungee cord. The actual data can be entered directly into the code.
Insert an appropriate title and describe how you determined the varibility of your data across the range of your data points. At some point in your description you need to articulate, in percentage terms, a numerical value for variability of your data that matches your description and data.
***
```
# determine the lengths of the data lists
xdata1length = len(xdata1)
ydata1length = len(ydata1)
# print out the lengths- visually check that they are the same
print("number of data points (x):", xdata1length)
print("number of data points (y):", ydata1length)
if (xdata1length == ydata1length):
print("Looks good:)")
else:
print("Something is wrong here!!!")
```
### Scatter Plots
Most data that we will want to analyze in physics is (x,y) data. For this type of data the typical plot type is called a scatter plot which is just what you think of when you plot individual data points.
To begin the process in python we need to create a container for the multiple plots we will be creating. One way (not the only way) to dothis is with the ```plt.subplots``` function. This creates a container (called fig1 in this case) and a first set of axes called ax1 in this case.
[pyplot subplots documentation](https://matplotlib.org/api/_as_gen/matplotlib.pyplot.subplots.html)
We can then layer multiple plots onto these axes (ax1) by plotting and replotting until we are ready to show the whole thing. In this cell I am only creating a single plot of the first data set.
[pyplot scatter documentation](https://matplotlib.org/3.1.0/api/_as_gen/matplotlib.pyplot.scatter.html)
To try to keep things clearer for myself I have typically defined a new figure and a new set of axes for each plot. You will find that if you look at samples from the web that many coders jsut reuse the same labels over and over again. This works from a coding perspective but it violates a core expectation for all sciences that your code be clear in its communication. I encourage you to consider the choices you make in this regard.
```
# create a figure with a set of axes as we did with histograms
fig1, ax1 = plt.subplots()
# scatter plot data set 1
ax1.scatter(xdata1, ydata1)
# set up labels and titles for the plot and turn on the grid lines
ax1.set(xlabel='independent variable (units)', ylabel='dependent variable (units)',
title='My Data from Lab')
ax1.grid()
# Set the size of my plot for better visibility
fig1.set_size_inches(10, 9)
# uncomment this line if I want to save a png of the plot for other purposes
#fig1.savefig("myplot.png")
plt.show()
```
### Adding more data
When I want to add more data I just make another plot on a new set of axes. I have to start a new container (fig) because the ```plt.show()``` call blocks me from adding more information to the plot (there is something in this that is still not clear to me and perhaps soon I will
```
# a new set of axes
fig2, ax2 = plt.subplots()
ax2.scatter(xdata1, ydata1, color = 'blue')
ax2.scatter(xdata2, ydata2, color = 'red')
ax2.set(xlabel='independent variable (units)', ylabel='dependent variable (units)',
title='My Data from Lab')
ax2.grid()
# Set the size of my plot for better visibility
fig2.set_size_inches(10, 9)
#fig.savefig("myplot.png")
plt.show()
```
### Discussion: Deliverable 2
The second deliverable asks you to consider the data from your plot(s) and describe whether it has features that are consistent with an ideal physics spring (Hooke's Law). Are some regions linear? ....sort of? Is the spring stiffer at the beginning or at the end of data? Explain your answer. Do both sets of data show similar behavior? How or how not?
### Add physics model...
For the lab you are asked to draw straight lines that 'model' (describe) the behavior of the early and latter parts of your data sets. When we are creating physics models we are now generating 'data points' from a mathematical description. Again, there are a number of ways to do this but what I will show here is typical of physics and engineering models.
It starts by defining a set of x values.```numpy.linspace()``` is a tool for doing this and because we did ```import numpy as np``` it shows in the code as ```np.linspace()```
[numpy.linspace documentation](https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html)
What the function does is generate a list of values that are evenly distributed between 'begin' and 'end' in ```np.linspace('begin','end',# values)```
In this lab we are exploring linear models (Hooke's Law) for the behavior of the bungee (spring) which means we need a slope and a y intercept. One the nice features of lists is that if I multiply a list by a number I get a new list with the same number of elements each of which is multiplied by the number. Be careful. The calculation that looks like it's relating a single x and y value is really connecting a list of x and y values.
```
# actual model parameters - slope and intercept
model1slope = .12
model2slope = .045
model1int = 0.
model2int = 3.
# range of x values -- choose lower and upper limits of range
model1x = np.linspace(0.,50.,20)
model2x = np.linspace(30.,170.,20)
# in case you want to check how many values are generated
# modellength = len(model1x)
# print(modellength)
# generate y values from model
model1y = model1slope*model1x + model1int
model2y = model2slope*model2x + model2int
```
### Plotting References
There are a range of different marks that you can use to plot your data points on the scatter plot. Here is the link...
[marker types for scatter plot](https://matplotlib.org/3.1.0/api/markers_api.html#module-matplotlib.markers)
There are also a range of colors that you can use for all plots. I am not yet clear when some can or can't be used but here's the reference if you want to experiment...
[matplotlib named colors](https://matplotlib.org/3.1.0/gallery/color/named_colors.html)
When plotting lines (```ax2.plot()```) there are a few line styles you can use from solid lines to various dashed lines. Here's the reference....
[matplotlib line styles for plot](https://matplotlib.org/gallery/lines_bars_and_markers/line_styles_reference.html)
You will notice that I added a label to each plot. This is then picked up and attached to each plot and displayed in the legend. You can decide where to place the legend on the plot by choosing different values for 'loc'. Play with this to get a helpful placement.
```
fig3, ax3 = plt.subplots()
# scatter plot of the data
ax3.scatter(xdata1, ydata1, marker = 'x', color = 'black', label = "82 cm Bungee")
# draw the two lines that represent my model
ax3.plot(model1x, model1y, color = 'red', linestyle = ':', linewidth = 3., label = "initial")
ax3.plot(model2x, model2y, color = 'green', linestyle = '--', linewidth = 2., label = "tail")
# set up overall plot labels
ax2.set(xlabel='independent variable (units)', ylabel='dependent variable (units)',
title='data and model')
ax3.grid()
# Set the size of my plot for better visibility
fig3.set_size_inches(10, 9)
# this creates a key to the meaning of the different symbols on the plot
plt.legend(loc= 4)
plt.show()
```
### Discussion: Deliverable III
So what does your plot above mean? What explanation of the behavior of the bungee is suggested by the two line fit?
### Normalization
Normalization is the process of trying to see if a particular feature of the data has a simple dependence. In this case each bungee is a different length but otherwise they seem like would behave very similarly. To explore this question we normalize the stretch by dividing it by the original length of the cord. Do this for **both** sets of data and then replot.
The value of this normalization exercise is the impact of plotting data from multiple bungees. What I show here is the normalization of just one bungee. You will need to do 2 or 3 depending on how much data you have and plot them all simultaneously. Using different colors for each data set will help keep track of which ones are which.
You will note that I couldn't normalize by doing the obvious thing - ```xdata1norm = xdata1/length1```. Python doesn't like this (try it and look at the error message) so I had to hunt around and found this useful function. There may be other ways to accomplish this task but this works so that's where I'm going. As usual here is the documentation link:
[numpy.true_divide](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.true_divide.html)
```
length1 = 75.
xdata1norm = np.true_divide(xdata1,length1)
fig, axn = plt.subplots()
axn.scatter(xdata1norm, ydata1)
axn.set(xlabel='independent variable (units)', ylabel='dependent variable (units)',
title='My Data from Lab')
axn.grid()
#fig.savefig("myplot.png")
plt.show()
```
### Discussion: Deliverable V
What does it mean? What we expect is that the data for all the different bungees makes a single shape as opposed to a family of similar shapes. How might this help us predict the behavior of a bungee of a different length?
## Reflection
As usual I learned a bunch of new stuff in the process of creating this notebook as a framework for your lab report. Thanks.
### Extensions
Extensions are ideas that I didn't have time to explore or develop fully for this lab. These are offered as opportunities for students with more programming experience than is typical for students in the class.
#### Create a separate legend for reporting slopes of the fit lines
I feel like this might be nice at some future time.
#### Least squares fit of straight line to data
I am strongly in favor of drawing our own lines on the data because it makes us think about what the terms in the line mean in contect. Never the less I would love to know how to get numpy to do a least squares polynomial fit that I can hold to a linear function. Would also be interested in a higher order ($x^2$ or $x^3$) fit to the whole data set.
#### Standard Deviation
This is a long way out. Each data point has a certain amount of uncertainty due to issues of reproducibility (mostly due to heating of the bungee). Is there a straightforward way to attach a bar of the correct length (either horizontally or vertically) to represent the standard deviation.
| github_jupyter |
# Data Similarity
Previous experiments have had some strange results, with models occasionally performing abnormally well (or badly) on the out of sample set. To make sure that there are no duplicate samples or abnormally similar studies, I made this notebook
```
import json
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import yaml
from plotnine import *
from sklearn.metrics.pairwise import euclidean_distances
from saged import utils, datasets, models
```
## Load the data
```
dataset_config_file = '../../dataset_configs/refinebio_labeled_dataset.yml'
dataset_config_str = """name: "RefineBioMixedDataset"
compendium_path: "../../data/subset_compendium.pkl"
metadata_path: "../../data/aggregated_metadata.json"
label_path: "../../data/sample_classifications.pkl"
"""
dataset_config = yaml.safe_load(dataset_config_str)
dataset_name = dataset_config.pop('name')
MixedDatasetClass = datasets.RefineBioMixedDataset
all_data = MixedDatasetClass.from_config(**dataset_config)
```
## Look for samples that are very similar to each other despite having different IDs
```
sample_names = all_data.get_samples()
assert len(sample_names) == len(set(sample_names))
sample_names[:5]
expression = all_data.get_all_data()
print(len(sample_names))
print(expression.shape)
sample_distance_matrix = euclidean_distances(expression, expression)
# This is unrelated to debugging the data, I'm just curious
gene_distance_matrix = euclidean_distances(expression.T, expression.T)
sample_distance_matrix.shape
sample_distance_matrix
# See if there are any zero distances outside the diagonal
num_zeros = 10234 * 10234 - np.count_nonzero(sample_distance_matrix)
num_zeros
```
Since there are as many zeros as elements in the diagonal, there are no duplicate samples with different IDs (unless noise was added somewhere)
### Get all distances
Because we know there aren't any zeros outside of the diagonal, we can zero out the lower diagonal and use the the non-zero entries of the upper diagonal to visualize the distance distribution
```
triangle = np.triu(sample_distance_matrix, k=0)
triangle
distances = triangle.flatten()
nonzero_distances = distances[distances != 0]
nonzero_distances.shape
plt.hist(nonzero_distances, bins=20)
```
Distribution looks bimodal, probably due to different platforms having different distances from each other?
```
plt.hist(nonzero_distances[nonzero_distances < 200])
plt.hist(nonzero_distances[nonzero_distances < 100])
```
Looks like there may be some samples that are abnormally close to each other. I wonder whether they're in the same study
## Correspondence between distance and study
```
# There is almost certainly a vectorized way of doing this but oh well
distances = []
first_samples = []
second_samples = []
for row_index in range(sample_distance_matrix.shape[0]):
for col_index in range(sample_distance_matrix.shape[0]):
distance = sample_distance_matrix[row_index, col_index]
if distance == 0:
continue
distances.append(distance)
first_samples.append(sample_names[row_index])
second_samples.append(sample_names[col_index])
distance_df = pd.DataFrame({'distance': distances, 'sample_1': first_samples,
'sample_2': second_samples})
# Free up memory to prevent swapping (probably hopeless if the user has < 32GB)
del(triangle)
del(sample_distance_matrix)
del(distances)
del(first_samples)
del(second_samples)
del(nonzero_distances)
distance_df
sample_to_study = all_data.sample_to_study
del(all_data)
distance_df['study_1'] = distance_df['sample_1'].map(sample_to_study)
distance_df['study_2'] = distance_df['sample_2'].map(sample_to_study)
distance_df['same_study'] = distance_df['study_1'] == distance_df['study_2']
distance_df.head()
print(len(distance_df))
```
For some reason my computer didn't want me to make a figure with 50 million points. We'll work with means instead
```
means_df = distance_df.groupby(['study_1', 'same_study']).mean()
means_df
means_df = means_df.unstack(level='same_study')
means_df = means_df.reset_index()
means_df.head()
# Get rid of the multilevel confusion
means_df.columns = means_df.columns.droplevel()
means_df.columns = ['study_name', 'distance_to_other', 'distance_to_same']
means_df['difference'] = means_df['distance_to_other'] - means_df['distance_to_same']
means_df.head()
plot = ggplot(means_df, aes(x='study_name', y='difference'))
plot += geom_point()
plot += ylab('out of study - in-study mean')
plot
means_df.sort_values(by='difference')
```
These results indicate that most of the data is behaving as expected (the distance between pairs of samples from different studies is less than the distance between pairs of samples within the same study).
The outliers are mostly bead-chip, which makes sense (though they shouldn't be in the dataset and I'll need to look more closely at that later). The one exception is SRP049820 which is run on an Illumina Genome Analyzer II. Maybe it's due to the old tech?
## Without BE Correction
```
%reset -f
# Calling reset because the notebook runs out of memory otherwise
import json
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import yaml
from plotnine import *
from sklearn.metrics.pairwise import euclidean_distances
from saged import utils, datasets, models
dataset_config_file = '../../dataset_configs/refinebio_labeled_dataset.yml'
dataset_config_str = """name: "RefineBioMixedDataset"
compendium_path: "../../data/subset_compendium.pkl"
metadata_path: "../../data/aggregated_metadata.json"
label_path: "../../data/sample_classifications.pkl"
"""
dataset_config = yaml.safe_load(dataset_config_str)
dataset_name = dataset_config.pop('name')
MixedDatasetClass = datasets.RefineBioMixedDataset
all_data = MixedDatasetClass.from_config(**dataset_config)
# Correct for batch effects
all_data = datasets.correct_batch_effects(all_data, 'limma')
```
## Look for samples that are very similar to each other despite having different IDs
```
sample_names = all_data.get_samples()
assert len(sample_names) == len(set(sample_names))
sample_names[:5]
expression = all_data.get_all_data()
print(len(sample_names))
print(expression.shape)
sample_distance_matrix = euclidean_distances(expression, expression)
# This is unrelated to debugging the data, I'm just curious
gene_distance_matrix = euclidean_distances(expression.T, expression.T)
sample_distance_matrix.shape
sample_distance_matrix
# See if there are any zero distances outside the diagonal
num_zeros = 10234 * 10234 - np.count_nonzero(sample_distance_matrix)
num_zeros
```
Since there are as many zeros as elements in the diagonal, there are no duplicate samples with different IDs (unless noise was added somewhere)
### Get all distances
Because we know there aren't any zeros outside of the diagonal, we can zero out the lower diagonal and use the the non-zero entries of the upper diagonal to visualize the distance distribution
```
triangle = np.triu(sample_distance_matrix, k=0)
triangle
distances = triangle.flatten()
nonzero_distances = distances[distances != 0]
nonzero_distances.shape
plt.hist(nonzero_distances, bins=20)
```
Distribution looks bimodal, probably due to different platforms having different distances from each other?
```
plt.hist(nonzero_distances[nonzero_distances < 200])
plt.hist(nonzero_distances[nonzero_distances < 100])
```
Looks like there may be some samples that are abnormally close to each other. I wonder whether they're in the same study
## Correspondence between distance and study
```
# There is almost certainly a vectorized way of doing this but oh well
distances = []
first_samples = []
second_samples = []
for row_index in range(sample_distance_matrix.shape[0]):
for col_index in range(sample_distance_matrix.shape[0]):
distance = sample_distance_matrix[row_index, col_index]
if distance == 0:
continue
distances.append(distance)
first_samples.append(sample_names[row_index])
second_samples.append(sample_names[col_index])
distance_df = pd.DataFrame({'distance': distances, 'sample_1': first_samples,
'sample_2': second_samples})
# Free up memory to prevent swapping (probably hopeless if the user has < 32GB)
del(triangle)
del(sample_distance_matrix)
del(distances)
del(first_samples)
del(second_samples)
del(nonzero_distances)
distance_df
sample_to_study = all_data.sample_to_study
del(all_data)
distance_df['study_1'] = distance_df['sample_1'].map(sample_to_study)
distance_df['study_2'] = distance_df['sample_2'].map(sample_to_study)
distance_df['same_study'] = distance_df['study_1'] == distance_df['study_2']
distance_df.head()
print(len(distance_df))
```
For some reason my computer didn't want me to make a figure with 50 million points. We'll work with means instead
```
means_df = distance_df.groupby(['study_1', 'same_study']).mean()
means_df
means_df = means_df.unstack(level='same_study')
means_df = means_df.reset_index()
means_df.head()
# Get rid of the multilevel confusion
means_df.columns = means_df.columns.droplevel()
means_df.columns = ['study_name', 'distance_to_other', 'distance_to_same']
means_df['difference'] = means_df['distance_to_other'] - means_df['distance_to_same']
means_df.head()
plot = ggplot(means_df, aes(x='study_name', y='difference'))
plot += geom_point()
plot += ylab('out of study - in-study mean')
plot
means_df.sort_values(by='difference')
```
These results indicate that most of the data is behaving as expected (the distance between pairs of samples from different studies is less than the distance between pairs of samples within the same study).
The outliers are mostly bead-chip, which makes sense (though they shouldn't be in the dataset and I'll need to look more closely at that later). The one exception is SRP049820 which is run on an Illumina Genome Analyzer II. Maybe it's due to the old tech?
| github_jupyter |
```
# LINEAR Regression on Precision table
import pandas as pd
from sklearn import linear_model
import numpy as np
import seaborn as sns
sns.set(color_codes=True)
def sk_linearReg_org(data):
data_set = [[value[0], value[1], value[2], value[3]] for value in data]
Y = [value[4] for value in data]
clf = linear_model.LinearRegression()
clf.fit(data_set, Y)
# application of the model to the data
model = [clf.intercept_+ np.sum(np.array(clf.coef_)*np.array(value)) for value in data_set]
# calculation of the residuals
res = np.array(Y)-np.array(model)
return [clf.intercept_, clf.coef_, model, res]
def sk_linearReg(data_set, Y):
# data_set = [[value[0], value[1], value[2], value[3]] for value in data]
# Y = [value[4] for value in data]
clf = linear_model.LinearRegression()
clf.fit(data_set, Y)
# application of the model to the data
print("intercept=",clf.intercept_,"coef", clf.coef_)
df = data_set.copy()
df = df.multiply(clf.coef_, axis=1)
print("model")
model = clf.intercept_ + df.sum(axis=1)
display(model.head())
#model = [clf.intercept_+ np.sum(np.array(clf.coef_)*np.array(value)) for value in data_set]
# calculation of the residuals
res = Y.values - model.values
return [clf.intercept_, clf.coef_, model, res]
# Using float quality from random forest
# df = pd.read_csv("../metalicities/extended_quality.csv")
df = pd.read_csv("random_forest/float_quality.csv")
df.columns = df.columns.str.strip()
#df.Resolution = df.Resolution.str.replace("k","").astype(float) * 1000
#df.Band = df.Band.str.strip()
#df = df[df.Band =="K"]
print(df.head())
len(df)
# TODO apply the scaling/normalization to columns
data_table = df[["Temp", "logg", "[Fe/H]", "Resolution", "Band", "vsini"]]
expected = df["Quality"].astype(float)
data_table = data_table.astype(np.float)
[intercept_, coef_, model, res] = sk_linearReg(data_table, expected)
print(data_table.columns)
print(coef_)
import matplotlib.pyplot as plt
plt.style.use('presentation')
plt.hist(res)data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAewAAAFcCAYAAAAK4I0VAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAIABJREFUeJzsvXtcVNe5//+ZGwMDzAwy3B28IaCIRgNaAU9j1Ig2jcaeSlJN25jY803aGnMa4zltYpvk5Nta0+Z2mvwaj0naE5uYNBf6zQUTo6YiXiAYg+igAsJwZwbmAnO//P7Y7MXeey7MwCBI9vv1yivO3rPXXmvtYT/redZzEXi9Xi94eHh4eHh4JjXCie4ADw8PDw8Pz8jwApuHh4eHh+cGgBfYPDw8PDw8NwC8wObh4eHh4bkB4AU2Dw8PDw/PDQAvsHl4eHh4eG4AeIHNw8PDw8NzA8ALbB4eHh4enhsAXmDz8HDQarXIycnB/v37fc7l5ORgz549Aa+tr6/Hvffe63O8qqoKO3bsiEj/ItkWDbPf49H+jQg/DzyTDfFEd4CHZ7LyySefYPv27eRzRUVF2G3s27cPu3btQlFREdRqdUT6FWpb9L3Hq/2pDj8PPJMNXsPm4fGDWq2GyWSCyWQix6qqqrB27dqw2nn77bcBUFp7VVVVRPoWalv0vcer/akOPw88kw1eYPPwBGDt2rX45JNPyOeqqioUFxeTfzPNpfv37/cxoe/YsQMmk4n8/+TJkwCAe++9l6Wt33vvvaiqqsK+fftQWFiIwsJC0nZ9fT127NiBHTt2EFM8sy0Afq9j3hsADh06hNWrV2P16tVBTfrc9un779mzB6tXr8amTZvIIiZQm6GOg8bfuWBt0/2g5zyc63fs2EH6xnwG/o5z53n//v0+bQabHx6eSMMLbB6eAKxfv568vOvr6zF//vywrn/hhRcgl8vxwgsvsI6XlZXh448/BkAJhQsXLmDBggU4deoUqqurUV1djba2NqLdHT58GP/2b/+GJ5980uceJpPJ73XMe9fX12P//v147733cOTIEQCU4AuVw4cPo6ysDEeOHMH06dNx6NChgG0G6s9I42CeC9R2VVUVTp06hSNHjuD111/HoUOHYDAYQr7+0KFDAIDq6mq8/vrr5BkEOs6kqqoKhw4dIm1qtVpynb/54eEZD/g9bB6eAOTl5UGr1QIAPv74Y6xfvx5Go3HM7ZaWluLxxx8HQO2Tb968GXK5HK+//jqqqqpQX18PrVYLk8kEhUKBvLw85OXl+W0r0HVMPv74YxiNRvz4xz8GQAn5cPZm1Wo1uX9xcTFMJlPANkc7Dua5QG2bzWaUlZWRcTO3J0K5fv369di/fz/27NmD0tJSspBasGCB3+NMTp48ibKyMsjlcgDA9u3b8dZbb2HBggV+54eHZzzgBTYPTxCKiopQUVFBNDimiZyJwWCAUqkMud3ly5cTre35559HfX09HnroIWzfvh2lpaVkoQAACoUiYDvBrqNRKpXYvHkzywEtHKHiT7gHanO042CeC9T2M8884/M9WsMO5Xq5XI4jR46goqICFRUV2LNnD44cOYK8vDy/x0fCbDYHnB8envGAN4nz8AShtLQUr7zyCuRyOdGuAEpAXLx4kXw+fPhwWO3S2h5AvfCrqqpQVFSEsrIyKBSKkJ2dQrmutLQUhw8fJkJ6x44dYzbbBmpztOMIpe2ioiJWvwONIdD1+/fvx759+1BaWoonn3wSRqMRJpMp4HEmxcXFrPvt378fpaWlYY+Nh2cs8AKbhycIRUVFqK+vx7p161jH8/LyUFRUhNWrV+Pee+/F9u3b/WpaarXab1x2aWkpqqqqiIm3rKwMFy5cwOrVq/HYY4+hrKwMzzzzzIj9C3YdfW+1Wo3t27dj06ZNKCwshFKpZIWrjYZAbY52HKG0XVpaivnz5xPnrvnz5/u1agTr26lTp1BYWIjVq1fjkUcegVwuD3icCb0IoZ3O1Go1eXY8PNcLgdfr9U50J3h4eHhGor6+HlVVVWSxsWnTJjzyyCMoKiqa4J7x8FwfeIHNw8Nzw7Bjxw5cvHgRcrkcy5cvH1ViGB6eGxVeYPPw8PDw8NwAjOsetlarZSWXoGMpmQkmxnKMh4eHh4fnm8K4CmxmJqGKigoolUoUFRXBYDCQEIrRHuPh4eHh4fkmMW4Cu6KighX2UFVVRbxo8/PzUVVVNaZjwXC53OMxJB4eHh4engljXBKn1NfXIy8vj5XMQKvVEqErl8tJQoWxHAtEf78lgqMBkpLi0dtrjmibk4GpOi5g6o5tqo4L4Md2IzJVxwVM3NiSkuIDnhsXgf3MM89ArVbDYDDg1KlTOHToENRqNbRaLfLy8lipEcdyjIeHh4eH55vCuAjs1157DQCVDpBOnlBRUUE047q6OpSWlsJkMo36GA8PDw/P5EbT0g8AyJ2RMME9mRpct0xnpaWlMBgMZP+5qKhoTMd4eHh4eCY35ZXNKK9snuhuTBmmZBx2pPcdpuo+zVQdFzB1xzZVxwXwY7sR8TcuTUs/WrvNOHdFhwYtVZwlR63EhpJZIWnaXK18orT0b8weNg8PDw/PNxNao956WzYeP3CW+vfaHGSoYsO6nhbQ3M+hMFVN8bzA5uHh4eEZM5qWfpRXNhOt+rl3zqNoQSpUimjUaHqQUTIrrOt/feAMIBBA2zMAANh7sDZkLX00Qv5GgBfYPDw8PDxjJndGAuJlEqJVry5QY+3STABAtaYn7Ot/smEB4PWGpaVzhf7eg7VYPFeFzJT4KSG8eYHNw8PDwxMRqjU9uKN4JgDA5hhOYFWYmxz29TWaHni9XtbnkbR0rtDfujYHbxxuwLkrurBN6l1GO1IV0pCvuR7wApuHh4eHJyJkJMUR4RyKVh3K9eG2Rwt9ndGK594+D73JBiB8k7okSoR///6isMcwnvACm4eHh4cnIjA16VC16lCvD7U9ptA/fLYVh45eBTB6k3qoQv56cN3isHl4eHh4eMYbpmC32l24o3gm7iieiZoQ99G33pZNPm9dmwNg2Ot8ouE1bB4eHh6eKcloTPS0ST02VooaTc+kChHjNWweHh4ennFH09J/3TXV0ZjoM5LisHHFbOTPUaH6UjcatAY0aA3Ye7B2wjVtXmDz8PDw8Iw7N0qaUlqw52ep8MDGBeT41rU5E65l8ybxG5zHHtuN48c/D3i+srLmOvYmMGYzleIvPj5w2r2R2LZtK1atWoMtW34UqW7x8PCMM5PJkStc8zY3zGyksLLxhhfYU4Ds7Fzs3v3YRHcjKI8/vhvZ2bl48MEdE90VHh6e64i/2OhQ05RGmnAzoI01TC3S8AI7gkyUc4JcLkdOTu51vScPDw9PqERKUw30jh3p3TtaLX+sYWqRht/DjiCTdY+muvoMSkoK0N7eRo7t3Pkg7r33XgBAe3sbdu58EO3tbdi2bStKSgqwbdtWVFefYbXz0ksvYPPmDSgpKcDOnQ+ioUHDOv/73z+N0tKVKC1did///mlyfNu2raipOYu//e2v2LZta0jtmc1m7Nz5IEpLV2Lz5g0oL38vonPCw8MTOUZyKKMduTaumI30MWjXgd6xI717/YVrTfR+9GjgNewIMNF7NCaTyUd4AkBcXBwyMqajsHAZ7rjjTjz++H/g1VffwLFjR6DRXMKxY0dht1Pfrak5i4cf/im2bPkR0tMzUF7+Hh5++Kc4dOgDZGRMx86dD8JkMmHXrl9CLlegvPxd3HffVnKeFsRPPfU7DAyY8fLLL2Lnzgfx3HMv4dVX38DOnQ+yTOIjtff979+B9PQMPPXU7wAA+/b9X3R0tGPVqjXXZU55eHhCZyRT80ia6mg1ZPreobx7J9t+9GjgBXYEmOg9msuXNbjvvq0+xwsKluK5514CADz66K9QWroSL730Av7xj/exe/evIJfLWfVet2z5ETZs2AQAKCxchm3btuLgwb9gy5YfoabmLD755BhxGnv00V9Bo7mE48c/R3Z2Li5f1rDOx8XF4+DBv/jtb3t7W9D20tMzMDBgxvPPv0zOP/vsn1BWtjFCM8bDwxMJ6q7q8JcP68esrIwk8IO9Y0N99460Hz2Z4q0DwQvsCDGRqzemYA7GU0/9Dg8//FPccssqrFy52m873M81NWdx+TKlva9bt9LnmvT0DPJ/pgd4YeEyFBYu89uPkdozGo3Izs5ltZeRMZ3ci4eHZ3KQn6Uadd1rIDzrZKB3bKjv3pG0/BuhJCcvsCPEZPMm9MfAAKVNd3S0h3VdXBwlOAOFiAXSpEfb3ksvvRBWezw8PBPHWJSVcKyTgd6xY333Blo0JCX5D0GdSE2cdzqLEJPNm5CL2WzG3r1PY9euX6Kjo92vkK2pOevzOT09g2i23H3ynTsfxLFjR5CenoGOjnYSaw1Qjm6lpb4aNIAR25s3bz4uX9aw2mtvbwt7ocHDwzP+jNahjHZUowX+SPm+ue9Y+vqxvnvDdUibSOficdOw9+3bh4sXL6KoqAjbt28nx4qLi1FfXx+RYzwUgZzOAJBwr8cf342CgqXYsGET0tMz8PDDP8X3vrcB0dFK8t2XX34RAJCbOx/l5e/i8mUNnnrqd8jImI5bblmFhx56ALt3/wpxcfEoL38PGs0lPPXUMsTHxyM9PQMPPfQAHnjg58TpLDd3HqsvtFAPtz2AcjqjNXMeHp7Jw2gFJi30br15+qg05EiasEOxEky0czEACLxerzfSjWq1WqjVagDApk2b8N5776GiogJarRbbt2/Hvn37kJ+fT747mmOlpaUB7890pIoESUnxEW8zUoyU6ezAgTeg0VzEyy+/iHfe+QfZF37ssd3o7e3Cn//8F7S3t6GsbCOeffZPePnlF3H5sgbZ2bl44IGfs/ahX3rpBRw//jk6OtpRULAUDzywgywIKA3+v4iWfuutq/HAAzvI/Y4dO4K9e59GenoGXn31jZDae/zx3dBoLkEul2PLlh9hYMCM7OzcgHvjTCbzMxsLU3VcAD+2G5Fg4woWM80UfDlqZViCb6zX+6Na08NaNBTmJqPLaIfBYGG12947QMz3T92/bFyciwOZ4oFxEtg09fX1uHDhAsrKyrBnzx4UFRWhtLQUFRUVqKqqAoBRH3vyyScD3tflckMsFo3XsKYcWq0Wq1evRnV1NeRy+UR3h4dnSlB3VQeAcsz6JvKfL1UCAH77YInPuZYuE3627xgA4E+7ViIzNbz3zliv9wf3efnr/98OD1syBQDuXnt9E1aNm0lcq9Xiz3/+My5evIiysjKW1i2Xy6HVagFgTMcC0d9viehYpvrqWK8fAADodAOw2wUT3KvIMNWf2VRkqo3tLx/WAwB2b1kyqrHdCGFG3HFpWvrR2m3GuSs6ogE/8twXPhrwp1XNxAT96alrJKaaO+ZAcxDo+rFAP68NJbNYGvwjz32BxXNVyEyJh1ImYWni4/F7DaZhj5vTmVqtxgsvvID58+ejvr4earWaCFqTyQS1Wj2mYzw8PDyTEU1LP/YerGWVZaS1t3BgOjdNRGnK0VBe2YxzV3QjOnEFclTjOnQFcvAK5ugW7lxxn1d5ZTOK81NZ/T93RYfyyuYJdy4e97AuWuAWFRURoVtXV4fS0lKYTKZRH+OJHBkZ0ydNVS8enhsdf6FKACUYQtGW/Tk3WewuyKTiSattc/v83DvnUbQgFSpFtF8nLn8e38zrf33gDCAQQNtDWf+4Dl7BBCfTGS2YlYJ5jvu8qi914+7bctDSYcBzb5+H3mTz24/rzbgIbHqfuaioCMXFxZDL5SgtLcW+fftYe9IAxnSMh4eHZzLC9Tpu7DTB6XCH9KLnChC70x1QcIXDeJrYuX1eXaDG2qWZAELz/OZe/5MNCwCvN6yELOEudJiCnfm8Kk63IFERjR+szUVvrxmHz7bi0NGrIfdjPBlXp7OJ4pvkJT4Wpuq4gKk7tqk6LuDGHxtTINKexpqWfrzxaQM69JRfTagezR+caCL/NlucOHaOykEwFs/kvQdrAVB76pGC+cyYfRYIBMhRUyGjoS4QuNczRZNAIAhpn5rpxT0zNR7Xuqi+Mefdn5d5TqYSG1fMBgA8tv804mVReGbnt9Hba/bpVyT2y4MRbA+bz3TGw8PDEwGYGhttps2dkYAHNi4IO3UnM3vXS+/XjSnt8fWKH+ZmHItE7elw47OZmrLZ4iQCmznvgbKr0fPUobcAegv+86VKrB+yEuRmJpCF2EQi+s1vfvObCe3BOGCxOCLaXmysNOJtTgam6riAqTu2qTou4MYdm6alHwc+uoQGrQF6kw2aln6oFNFQKWMAAEdr27BkXgpmp8ajU29BbubIAowl1AUCrLp5OnJnJMBscRLhojPayD2CoVLGYFZaPNHSd25ehNnpkQnfZD4z86ADOqMNOqMNFWdaA85HIJjjWpSlIu2plDEhLXLoa2lNuVrTjeL8NORkKn3m/WhtG3Iylaxz3Hn69f3fQlpCDLXYaTWgZGGaTz/CeQ6hEhsrDXiO17B5eHh4xsBI+bAzkuKwfsUc9PaaR6Wh+XOwCkV75ZromVq6OUxzdSjQfdq9ZUlADXakezLHFa6GXl7ZDIvNidxM6toBi5MhvNnzHij/OHOe3jt2FW3d5qCWietdMITfww6BG31vLRBTdVzA1B3bVB0XcGOPbaR9TubYxuL8FU6WL+aeNTeT19Ev28i5sZCUFI8TNa0+fUpUREOliAYwPB/B9tC544qRimC1u0cco79rhQLAMyTV1EmxuHt1dshzzZynhg4T4iRCn8xm/mLNI5FtjWZC4rB5eHh4vimEUwBjLMUjQilU4S8OPD5GQs4d/bKNdW6s8d3++rQoS0Xmw+v1+vSHe09uG/fdPj/oGLnXMuOmPQwVVCQSspKwjDTW+BgJ+U7Jogy/hUlCjTUfD3gNOwRu5JV/MKbquICpO7apOi5g6o/NnyY6Gq0sFK/lYDmvI5EPmxZqKwoycaKmFZV1nT4adaj98TeuhlYDcjKVrPaCWSb+58OLsNic+OqqHgCgjIuCYYDaW89RK7F4rgonL3RBJhUHtSowrQBJSfH4+EQj0bg/ONGEhlYDeX6JcilyMhOgUkRH1Huc17CnMJs3b0BJSUHAal2PPbYbJSUFYdes5rJt29aQ2ygpKUB19Zkx3Y+HZ6oRbhnHQISizQcrWRlqOctgMK0E5ZXNuNZpCtqnUO7JHNeM1Hif9oJZJq51mqDtGcAdxTMxO12OxXOTyLni/DSUn2yGtmcgoIYfKDsdU+PeuGI26/mtLlDj/tvnh11WdCzwTmdThPLyd/Hoo7/yOR6skhfP9eNGyAvNM/6EUsZxJEJJjxnIqWqkcyPB3S8u+9VHsNhcAIadsgL1KVBoFP23QQvH3BkJuGvVXHI+PkZChCnzPsyYajrOvaHVgH/99hwcrW3DHcUzoTNaUV7ZTPbDAaA4P9Xv32Fxfiq5x9a1OcjPUuGR576g+j70febzszmG27xeaUp5gT0FyM7OxdGjR3wE9rFjR/ga0pOE6+1NyjM5GYuwDIdgQn0s+bC5HvEP370ET782cox5Q6sBDa0GVow6DVdr5v6NBPPCD3TObHWS+zAzla1cnAG9ye7Tv/LKZuiMVla2s0PHrvosEq7X8wsEL7CnAKtWrcHLL7+IhgYNqScNAJ9//hnuuONOv1r273//NI4f/xwejxe5ufOwa9cvkZExHYD/WtRcgtWy5hlmMhS955k8jCQsJ7Mlhu6bprWfCLbyLxqDWgwC/f4B+Hha0+w9WEuqY/nTbLn38XeOObdWuwsFOUmIkgjhcLpZ5mtu/xpaDdhQMgtmqxN5c5NICU9maBptBZiI4h/8HvYU4ZZbVqG8/F3WsePHP8eqVbf5fHfnzgdRU3MWzz//PJ566ncAgPvuu4ec//7374DJZMJTT/0Ou3b9EgcP/gWXL2t8rt+165c4cOANpKdn4L77tqK9vW2cRnfjEql9S56JhelhPJ6Vs8biQT4WQhkT3TfmXrMiLgq5mQkB93ED/f79eVozv0NXx2K1xbgPt7/B+pCRFIcH78yH3mhHc6eJJWgD9S8+RoL3jl3FHcUzUbQgBRWnW1hzMFHwGvYUYcOGTXj88f8gZnHaHM7VehsaNKipOYtDhz7ATTfNQ2+vGYWFy1BauhLl5e9BLpdjYMCM559/GfHxlDn92Wf/hLKyjQCA9vY21NScxSefHCPnH330V9BoLuH48c/9auPfdCKxb3mjEgmNcaxthFpjORhjSegRah+51aqK89Nw21BqzPEmWIUrbt8Aal85d0YCjIMOlFc2B9U4uYU1dEabT1UvndEKAQRIVEj9VsfimtRpb+7cGQlBze10XwPtgTP7pzNaUXG6BffdPp9KwuJw4Yl7l+LXr56FccAetI3rBS+wpwiFhcsAANXVZ1BYuIyYw7loNBcRFxdPzN80ubnz0NBwCXFx8cjOziXCGKDKb6anZwAA0bTXrVvp0zb9HR42E73vNZFEQriFWi4x1D6E0yeusPrps18QByZ/plvmdeH0k7sXKxIJce6Kjgjs8TKVa1r68ebnV1jVwPrMNkRHifHEtqXkvltvy2btE5sHHSELMO7vPz1RRtpaOEeFwtxkmK1OANQ2AXPPuTg/jdU/btlN7vPw1wfu3Bbnp/n0kcqOZoDOaGWNi9n+NPmwkxlzH/16bmPwAnsKceutq3Hs2BEUFi7D8eOf48CBNyJ+D9qJja+fHTqRKnqvaelHl9GOVEXgXMOThWB7l8DohGW4daHDrbHsD+7L/r7b5+O/360DQL203zjcgHNXdD5tjGahUq3pQdGCFDS0GkjRCrqPY134BBIquTMSIBIKyGe7041ew7B2S893TqaSZSXaUDIraDpWJtzf/wcnmkhbJ+u60KEbZMVGW+0ucl5vsrH6xy27yX0egfrA1PLLK5uhUkSTuTh3uRef1WiJYI6XRZHrEuKksNopD/RegxV5MxMwJ0PBspRdT4dSfg97CrFy5WocPXokoDkcAHJz52NgwOyz36zRXEJOzjzMmzcfly9rYDYPJ7Bob29DRweVEJ/Worlx3zt3Pohjx45Eekg8DMorm/G3T/3H208mmFoZDXPvMpQ9QH9t0HWhQ83Qxd2f/MmGBfjJd0PPoEXDjCH+7KyW7Gs+9/Z5n+xd/uJ5Pz3birqruqBj1bT0IyMpDvffnoed319EzhXnp5FFR7AY4lD3n7nX7T1YSxYHcpkEvQYrOX+ty0zmu0bT47NPXK3pwd235eCO4pmoON0S0r4+XSxDJhXj3BUd9Cabz7joPfLczARUX+om/UuUS1FxugWfnGlF0YJU1vMYKaY8IykOuZmU+Zy+5+6Xq7D7/6uCtneQFfbFnAPaNA8AXgAut5e1jz5SBrdIwwvsKQRtFt+792m/5nAAyMnJRXZ2Lh5++KeoqqpCQ4MGO3c+CIDaB1+5cjXS0zPw0EMPoLr6DKqrz+Dhh39KNOuMjOm45ZZVeOihB3Ds2BFUV5/BY49RHuUFBcuuz0C/YTBfDBca9dflxTAWaOHAFHQVp1vCern5a2NW2nCFqZGELS3EuAk7RpM0xF9Cj/tvz8PqguFtpa1rc4b/zVmonLuiC7rQosfKNBvTfdSbbCM6LQZbBAUTKtwFza4fLMHM1OGtsEVZieTfD9yZT+5L9zMjKQ4/WJuLjStmo7nTxOpDICc9ytmsF+eu6FiaM3Nc3NKkNDs334SFWSo0tBqg7RnAxhWzES+TBHU4oynMTfYZb2yMBLHREvJ55eIM3FE8k5XaNEkZg6IFKUiUS2GxuVipXifCoTSk1KSbNm3C3XffjXXr1iEuLm5cOxQJvkmpSTdv3oANGzYRZ6/f//5p/OMf7+PAgTeIhs39Dv29cMK6BgbMyM7OJYuCYGFdJSUFePbZP5HvTgST+ZmNhkikkxxvuCbo9EQZtt6WQ5JlMPcu/Y3BX/pOZhsvvV9HXsojpYKknZJuvXl60BrLgbYoQtmX5KYIpa9JHErRCXjR0GokWho3FWmgQh6a1n5WkpH23gHWfehx+7ueuadOEpIwTNfceeeOob13gKVBO5yU5lmyMN3vfHcZ7fjLh/U+Y2BW7tp7sBYWG7VHre0d9GkjUS5FbmYCK3+4v/7ph0p3Mu9Fm+y56UYDPb8PTjRBZ7QOadpUPLZcJoE8NgrK2Cg43V7SflJCDKLFQty9OjvgHIaSJjZcgqUmDUlga7VaHD58GIcOHUJmZibuuusurFmzZswdGy++SQJ7LEzVcQFTb2z0i4GuPxypvMWRJtjCItSKVoHa4FacChTHHIl83cEqS9HQfdC09OONTxtItq0osRAp02R4YttSlgOVv0WKv7Fy7/3BiSaWAKcLeeTOSPC5/o3DDeRauh16AUHnvM5hlNbkzikAMqb9H15Ev5kSasyFE5OkpHh8dbGT9GHb+nk4Wdfpt+rWzNR4Yt5m8tT9y9ChG/T7PJn9++BEE75u1JM2pBIR7EMLCnVSLMurPtDzo9tjztvM1HhIJSKYLQ48sHEBOf6nXSvx9KtnEC+LInnNAXZuc01rP6uEZyRis8cssJm8/fbb2LNnDwQCAdauXYtHHnkE06dPH/nC6wgvsENjqo4LmHpjo18G3IIEk41gQnkkgUs/s7FqLWOxRoxW4DPvSZOjVkIsEkARJ8WMdAUsFgdLWAIja5BcbRVgCyP6+sZ2I7Q9AzBZKE2WKSiZC4hwSmuGMo9JSfHY/9558lkgEKAgJ4lc97Pv5ROnsJWLM9CuGyBjVSlikJOpDOsZ/+9hDY6d6wAALJ2XjLOXqEUGLXTp+Rrp+f3hrXOs+aKRyyRYMDsRgBdX203o6af2s+kFCwAipENZ1I2GYAJb9Jvf/OY3IzVw6tQp7N+/H7/97W9hs9nw85//HE8++SSSkpLw8MMP45577vG5ZseOHdizZw+MRiOKi4sBAPv27YNAIEBFRQVuvvnmMR8LhMXiGGlIYUFrNVONqTouYOqNjX5ZxsZKoZRJRvj2xGG2OrHq5unInZEAs8XJeskH+jcN/cyCtREKR2vbkJOpRE6mEp16C3IzQ9euVcoYzEqLx7FzlJPlzs2LMDtdPsJVwNvHrkKdHIc5GXKiAe7cvAgnzndsNwJ6AAAgAElEQVTA7fHiF1sL0KUbQMWZVjS0GlCykAotYo7V7fGiOD+V3HtDySx8VqNFg5ZylKpt6MHxrzrQ1GmC3mSDpqUfKkU0Nq6YjRPnO+F0eWCxU3m9/8/GBUSYuT1emAYdqG3oweU2o8/1KmVM2PNIO4/NSFegWzeIDFUsVIoY6IxWvHPsKlks1DXqsXReChbPVUFvsqNkYTrKbp2LPpMdN+ckY9XN08kzpttUKWNY/2Zysq4TBbnJyMlU4lJLPxbOmQaLzYVOvQV6E7XgKc5PxbkruqDP7+NTLYiTDVf0opHHRmH3D5ZgSXYyomVRONfQCwB4dMsSzE6X48BHl6DtNuPspR7yXEaax3CJjQ0cBRJSWNdbb72F9evX48knn2QdLyoqwi9+8Quf71dVVeGFF14AABQWFuKuu+5CfX09lEolioqKcPLkSVRUVADAqI+VlpaG0nUeHp7rSCRC2Mbaxljj3keT6OZap4mYTuk0mCQBiN6Cn//hGNwuj09IWbCQJ9rhjNZWuSFN/uKh6ZKP5f9sQtGCVERJhDg+pJH6uz7YYsjfPDKdxwCqvCYzkcnuLUsQIxWTbYDF2Um4f2hvmmlVYRb3oI+FkpymcF4K+b7N4cZdq+ayLAHF+WnQGa0oWpACgHp+ZoZVg1ssJFEuhUAgwKDNCaudCmmjn43V5gqY8IXpnDfSPEaSkEziBw4cwH333TfiMX/ce++9eO2117Bnzx4UFRWhtLQUFRUVqKqqAoBRH+MuHpi4XG6IxaIR+8bDcz2gQ3rys1QT3BOeUKg8346SRRk+//ZH3VXKA/xCI1WHWZ0Shwc2LUJ+lgrvf3EVr/6jHgC1H+oFSG7qP+1aicxUX82Pe+9Wxp6vAEBXHyVoUqbJIABw99pctHSZWO0aBxz43V+rkZkaj/w5KrR2mZCZKg94fSjQv+FXyuvQ02chFbpmplPtNneYAAAL5iQiOUGGlGky0ueR7lF3VYf95XWkDVm0mLS/YE4ifnBbbtC/nWffpBYLFxqpPq4qzMTpC52IjZHgO8Wz8NFJSvj/9sESAPCZr9ZuM3oNVvKsHiq7CauXzvB5FuqUeHLdsrwUxMZEhT2PYyWohk07mtXX16OqqgperxcCgQBerxdtbW0jCmyTyYT586nVlVarhVqtBgDI5XJotVoAGNOxQPT3W4KeD5epth9KM1XHBUyusf3lQ+pFEIm9rsk0LiaRSB+qVMomRVKYnHQ5mWPmv/2RqpCi7JY5RGD/5Lt5SFVI0dtrhk4/OJwg5HwHBgbtKFqQCgD49NQ1n31beg6Z9zaZbCwt9/iXesTLorB19VxUa3rQ22vGp1XN5D5/++QSaq/0wmp340KjHn0GK8tT39/1ofDy37+CzmRjxSsDwK4tN6Ovb5BouGUrs1gOZKHcI1UhhZcRS7XtO/PIvnfZyiwyn4E4f7kHFruL9O3QZw0kNKuxzUCOP/LcF8QLn54v+jlcatThjuKZOFnXiTc+0WDRrGkoWZTBehYfnKCsFsYBO76+okNmSnzY8xgKwfawgwrstWvXYvny5Xj88cfx9NNPg6mMM1NXBuLQoUPYtWsXAErgarVa5OXlwWQyEQE8lmM8PJOZb1KlrrFmeyqvbIYkSoR/ZyQNCZfRLBpCuYb5HX/fD2RCZ5qUGzpMMJkkaGilfguL/GiM/uaQvl7T0o+jX7ZRply9hZU5jr4P7a3OFKp0LHWw60ean/LKZp9wrJuyVJBFi/DesauIlYp8sqAx+x9o/jQt/aRiFzM5yqEjV1C0IBUqRXTQLQk6rSq3XCYzjrp0aSbeP0HNq7/Sm8wtE2ao196DtVixeDqmxQ77i2QkxeHc5V606wbh8YLEZF/PiI2gAvsPf/gDfvGLX6C4uBh//vOffc7727+mqaqqQllZGQBK0y4qKiKacV1dHUpLS2EymUZ9jIdnshOsju9UYayLkpGuD0cI+xN4IxX+CCVPeaC9VWZmLn8CgLk/rYiV4gNGfWVguIDGp2dbcfJCV9CUqcF+S9xEI/R3Vi7OwNlLvWjqMGP3liUBrw80bvp4cX4q6ffKxRmo1nRD2zMAlSIalt5B3L58ZlCfgUDzR/+buU+/c/NN+ONb56A32nD/7fOD+iBw06oqYqMAeGEcdJK+1mh6fIS/Px+JjStms/bCt67NwaFjV+F0UIufQHHkxfmp13UBHtRL3Gw2IysrCwKBAMnJyVCr1T7/+aOiogJPP/00qqqq8Ne//hXJyckoLS3F0aNHIRAIoNVqcddddyErK2vUx4LBe4mHxlQdFzB5xna0tg2Jimiok+NgHHCE5bHsj7GMK5Dn7VgYyat6JM/fka4/8NEl4lUdqP+aln7qe368dpnXM9tTKaJ9rjl3RYcLTXqkJcaS/jK/82l1K7r6rKzvN3WYsPnWLNKXQAuyWeoEJMulfsf59+ONsNicGBjatw3k2RyK9/vR2jbEyySYmRaPi9f6oTfZSH8vXuvD3OlUchX6elpLbeowoWRhGj4924rGdiPmZCjIXJ270guZVAx1chyudZphHHTAandBb7LBNOiAedBB5ps5fu78VZxpQXe/1Wcuv2zoQVaGAnOnK/BRVQv6zHbSZ2VcFBrbjXC6PKznTrfdOLTvLRRSTmh2pwfRUUIsnZeCafJoXGkzQCgQoGTIGQ1AwL8Ben7FIgE+Od0Cbc8AmT+TxYlp8miWV/nKxRno6bdCJhVH9G8qmJd4UKezZ555BgKBINDpoBr2RMLHYYfGVB0XMDnGRidWoM2gzKxbo2kLoLxyRzuu8YobDRY3zbxnoPtzk8LkqJXEVBpKVivAN2bYPFT2kb5enRTLKvyRo1aiOD8Vr35MpQxlJvWIkYrw800LkTsjAZVfd5DvMGOKmd8PJVbbX7xyjlrJqkQlFgmwdF4KkpQxfs2soSSOYR7nJm15+f06xMuisHvLEpKA5cX3viYm9By1Ete6TPACmJUqZ1kD6PNpKhnxOqfZtn4eq5gGE+b8pSfKiHc2cy6TlNGYFh+N3VuW4K+HNaR9OhFMa48Zmcnx2L1lCT492woAuG1pJt45egWfnNX6tEc/f2YymxipCCpFDGRSyqjs7zdEz93eg7XoM9nQa7SxztNx5GaLE3JZFOUXcKmbzGmkGHUctkAg8KtVj6RhTzS8hh0aU3VcwOQY24vvfo0LzX1Ek2BqIuFCazu3fWtm2OMKpoFGAmYsMb2w0BltQbVT5v3p67+1MIPEKnf1WbD1tmyikcbLJGjtHgjYf6b2WdeoR2ZKPApzk8n1u36wBDdnJ7E03KttRuiMlIaUkiAjHtQut5fSwhTRePPzK5BJxVhToMbx2nYsmJ2IxXNVkEVLWPHWI8Vqx8ZK0a0bZMWXL8pS4dOzrRgc0qw9XgBeL8RiAZZk+wpjf3HsXKsD8zsXmvTIyVQiUSHFR1Ut6DFYWZrrx2daWfu/JosDDpcHbo8XRosdHg/7/js3L4LeaEOiQgqd0QaXm9L16pqo7GMrF2f49OnFd7+G3emGSCggpmqxSID2ngEM2JzwegGLzUX6dUVrQFyMBIvnqvDBP5vQobfA5fZSceiXe/FlQw8utxnwneUz8fzfv4bHS+17d+ktKM5PJdaHkoVpyMlUkuftclOx6EyLg0oRDd1QshqVMgbmQQf5zVrsLiQlxCBWKkaiXIpbFmeQOPJ71uZAZ7SiRtODjqH470j+TQXTsIMK7GCC+vTp08jLyxtz58YDXmCHxlQdFzCxY6MFZFOnmbzUgNCTcPhrixZ8dY06JMRJw3oxjDYZSKgwhUR5ZTMaWg1D6Rq9JIHFd4so71x/96evb+o044MvGslYaVPp8rzUEQUkc9Hw3heNaO40w+50E4EsFglxvlEPdXIcFs9Voa5Rj4vX+tBjoDyfB6wOzJ+ZgK4+ymy6dF4yPj7dAp3RTjTQHoMVYpEQ998+HyfrOjEzTR7yVgc34Q0tHFq6B1jfmyaPhsPpISb8kbYxgm0Z0HOyJDsZXq8X9c19ZP4Wz03CifMdxMS7cnEGEuXRaNdRe7SxUgniZRIkyqOhiI3CLYsz0Km3ICMpDt8tmgWxSEja83gB06CDCK3yymacu9yL4+fa0dVvhdfLdgTzeAGThRLWTOJlEnT3W2G1uyGTSpCdqWSlMrUPmbxdbi/KK5tJm1a7GxabE99ZPhMlC9NhtjhhHnTgaG07Fs9VIU0l80mJunPzIjicbrz2ySVoWg1YuTjD5+9k389X4OT5Dni9wK1LpsPudGPVzVRWz9wZCYiSCEdM0DIaRm0Sp3nnnXfwyiuvwGQyQaFQwGg0Yvr06Xj33Xcj0sFIw5vEQ2OqjguY+LExTbQrF2cgXiYZdXEAplnxT7tWIkYUeJsqEONRpICJv5SenfpBSMRCFOen4bNqLWTRYhTnpwW8PzcvNdNU+tL7dYiSiIaO+zcZc/sQJRbC4aLUxPREGexOD1SKaGISZhYjKbs1C9ahLGEdukE0d5oxYHXA7qSu55rAczKVZKsjJ1NJcn0Hwt/vkflcb8pS4Wq7EQNWSgtVJ8Xi7tXZrLSkTOcwrqPaSFsGwVKgioTAt+an4lJrP0wDDni8XiIMo8RCxMsk2PdgsU9ebwBweYGPq64B8M0jnpoQg67+4VKVcTFiDFipOV4yV4XaK8MlRxPlUmQmx+HcVSo87qn7l6H6Ujc+q6ZM3pkp8RDAC43WCACIjxHDPNQW/X3mwnHvwVqYLQ781/Zv4YMTTejQDZIFxppCNfRGGwl/o+dvQ8ksVNZ14kIT1YdoqZikJqVN6k9sW0ruseulkwAQ9Dc9GsacmnTPnj1488030d3djeeffx633HIL7HY7ioqKItLBSMNr2KExVccFTPzYaBNtvEyCJGUMNq6YPao0mwBlWqfNst19VsxOC38lP9Z0nyPB1U6Y2lJrjxlWh5u8HBMD7HfGxkrx2ocXIZdJ4PF4oTMOOx/NSpND2zMAq92FhVkqksqy9nIvcUiiNMthrf6BO4fTcyYqokn6Sk1LPxbOScTXjXpiRnd7vMhIikOGKhan67vQ1W+Fe0hqyWUSpCfKiPa9oWQW2V/Xm2xo7TGjqcOElYszWE5b3LFxf4/0c82bOQ3qlHhsXjmHzF+MVIyWLrOPUxztHMZ1VJOIhejQDfqYZ2mtWxolZqVATVfJyDxRZmknDAN2uD1U3Wcat8cLq91N5oyrvbfrLXA4XMjJTKD25TOVpF2RSAiv1wuRSAC3x0sWTwDQ2WeBSAiiZf/yngJc1hpI2lHaAtI/4IDL7YVUIoS2ZxDioTbphZRYKEBcjARSiYg40dEWKbPVObTI8cIw4EBXvxUutxcWmxOtPQNkKwLAUBERL85c7IbFTmny8bESssCgTeqaln5YbE78/Xgjq452oN/0aBhzalKz2Yz4+HgUFxfj9OnTWLNmDf74xz9GpHM8PFMRf6E+4Tqc0Rojvc/Y0GrAknkpo+pPJFKGjkS1pgeJcuplMyttOKf2fbfPJw5BI4W20Sk+d35/ESvdJFNzA6iQqPLKZuKQxAzTSpRLKa3+rJbEB5stTtKfQPG49L/XFKqJ5gtQ+9/lQVKGWu1uaHsGsPdgLa51mSAUCnDb0syAIWUAWPHDKoUTXq8Xn5xpQaJcCr3Jju5+K2Kkw69nu9NNtOmfPvsFERRikQAyqZhYB+j5ouEWDgGo5/8/H9ZDESsh+8oDVhdrz/qmofjtr4Y0XjoNqqalH7kzEshcZabGo/ZSN/RGGxZlqfD20SsQCgGhQADjILVAkUqE8IfbQ/U/SRmDGk0PK+1otaYHpcsyyRx7vIDLw7GtA1iSo4IyLpqUBA0U/sa0eHFTtALUguX0xW7WomKQocHT0O3lzZo2IeGaIZnEH3roISiVStx///146KGH8J3vfAcnT57Eq6++ej36GDa8STw0puq4gKkzNq73803zUifluLglJuNixFg4RwWVIhoNrQZSnpBbzxkYjmn++GwryRhG58SmS0IyK0BtWz8Pn1W3smJik5RUCclew3Dt6XiZBA/emQ8AYdXS3vXSSVhsLqhT4iCAALkzEuD1ev3WqDZZHMSrmVnuMUYqQlyMBNESEe5enY0VBZl45LkvAFDC88nXq8kCgjbncs30yrjh4hSK2CgiADf+yyx88M9mn36LRQJEiYUQi4RQxEZBFi0JWLWqWtMDTUuf38pXeTOnIWu6ApVfd8DudOPWJdNZ9b5p4c99ZlESIRxOjqcaA4EAEAkFmJEST8Kx6L7lZCpJmUoa2ux+sq4TLreXjJ9GJBRAIABe2bXS73XUPSlv/Mq6TqiGyozqjTZ4AZy92E0tAvywcnEGDIMOnLvcOzQnCZiToSC/HfoeOqMVAgj81vIeLWM2ia9btw5ZWVnE4ezKlSv42c9+BoVCMdKlEwJvEg+NqTouYOqMjRt7u2ReyoQ60zGdmujPuqHwl+L8NGLSvfNfZmPzyizkzkhAW+8A/vWWLB9zPNNhSme0ITNNgTP1XQCAO4pnkevNFic0rf1kHqwON75bPJPcCwDuunUuGtuNxIS5c/Mi4iAEABAIRtwSoM2p2t5BuNxeUv4xIykONZoe0tcMVSw0rf3IzaTaatcNIl4mwbwZCcRpi/I6t8JkcaK6oQcnz3fgaruReMzTc5Yol8JgtmNJdhIyVLGs5+3yeNFroMzwWRlyLM+jvKA1LQbkz5nm40jl8QJON2UuNlmckIgEPrHd9DNblKXyqXyVm6lEQe6wg1qPwQaHywOLzYnGDiO0vcMmd4vNCUWcFKuWzSB72MwtCIASeiKhgNTUpvtotjggEQvJlsPOzYuw1I/liA6J1PYOwu50QywSsBRs2pntkzMt6DdTY6LHt3HFbPKsK860oqffggfvzEfujAT8tUKDa11mv8I6b+Y0LM9LQXSUGE63B4uzVENx7XJsXDGbLFroLYbPqtvQ3W/BrTdHrsT0qL3EmdDCWa1Wo6ioaNIKa4AX2KEyVccFTI2x+Xv5zJ0xbcLGFSgJCf2f3elm7QnTntNUfWEKeu/ZX9KSc1d6sfrm6T7X0+ZrpsDVtPbDCy8SFdGQScU4eaGT1DamhaBMKiYLjEClPbmLDmYo2M7NiyCXRVElMTkhcSfruqg0lb2D0JsoT/IegwXpibFITZTBaneRPVK3xwuVMhp9QyZwZunLX95TAFm0hPSJOc6vrvQSgWpzelCYmwyVIgY2hxux0RLkZCrR1GmCVCxE3qxp6O4bdvACqL1ouUyCNQVqkiiF9QwZi5hegxX3rp9H9rhvXZLBColLSYghe9MbSmZR5SVbDXC5vZidGo+cTCXOXuxBokKKREU0pBIRZqcryHbQ7HQ5EdweL4iwjosRY8DixJLsJJ/fW+6MBJZfxIOb8sm8rVoyHc2dlJaeOk2GfrMdaYmx1BaS0Ua85ulnZ7Y6UdvQg4qzreg3O7iWdUSJhVDGReGmuUnE32TujGkoWZCKwnkpxPP8s5o2XGjSI0kZg78fb2TtlV+PsK6QTOJr1qwhCVQMBgPMZjM2b96MJ554YsydGw94k3hoTNVxAVNjbP4SjYx2XGMpzsH1vuYmIaFJT5SxCk0E2ytnmvqnxUvRN/Qyp023zP1lf9CCID5GwjLFA8C29bmQRolx9Ms2AMETxdBzDFD7mOqUeGI6pc2fzL5yPaGZ3uNFC1JJKcn/+msNMlPiiLl8fdFMGEyUQNUb7WSLQG+0oTg/ze9z4SZKYY7ngxNN1EKJUVZTLBKie8irmWlOz1FTsdjankHyzNRJsSjOT8NtSzP9zgvXq7yuSQ+JWAiny4NBm4sIXHVKHO6+dS5yZyTgrc+vYE6GAvExEnx06hoatAZWWKM/nrp/GTp0g4iPoULeaA94gEqO8sGJJjS2GzFodaJDb2HtMQOAUDC8rc38N/07Yu5nb1s/Dwc/ayAOawIMO9hJJSKsXapm+Z4w/9Y0Lf2sJDMxUhHuXjWX+DpwvdTHwqiLf9B89tlnrM9VVVU4fPjw2HrFwzOFYToYAcFzVfu7NtJFQ8ZSnIPryOPPaQcAHrgz3ye/dSDoghkn6zphGBg2mYbqwMNsv3RZJnlxrlycgYZWAytsae/BWiyeq0JmSjyr6AQzLIqmu9+Knd9fRBYdzL4Cvg5ns9LkWDiHsiAws0KuXZqJ9t4B3FE8Ex26QZgGHNAbqXHOSI0n+7WP7T+N1m4z7l6d7fNs4mMk5Ddz9Ms2n99DYW4y6cfqAjWO1GgRFyNGbmYC3G4PCZGi5/TJ16tJ2yIRFUPszzkOYBcUee6d8z6CkuY/flhIwgzpGtf0IkgRG+VTmIPLE6+dxb9+ew5ZjOTOSED5UDnM25ZmIiMpDp/VaOH2eOHmZnKBb3w3TXF+Glq7zWjtGSAVtsorm4mw5mJ3utHQavAbT0//PTKLqljtbpRXNiNvZgIUcVEh100fKyFp2P743ve+x8dh3+BM1XEBEz82puYGBE/N6Q+usxktxMIdl7/46NEIf64jD/3aoONk1xSqQ4pFpYWDprUf5y73shzHYqPFmJUaj3+/a3FIbdBj2PXSSZZzVLoqluW8Rae4BNjPwThoJ6FaTOg5ouF6k9MOZwDQobfgwY0LyDnmQoL+rGnpx5tHr0DbPRwzvXiuipV6lU6HSpM7I4H1e2HGbNOpQOlENDqjFQ2t1P443X5rt5nEveuNNrT2DLBSoNKar7/4beZik/nb4ZIol+Km7GRsWT2XXBfs+zRymYRsX+RkKmGxuUjfWBqzEBBAQLT5QOSolaziJHanCxea+uB0e5AQJyUFQory00iqVpHQt13m3xm33Cvz75Gm7NYsfDW0TTCWtMNcxqxhc3OKt7a2Tuo9bB6eiYIupsDV3JihOKFozIFKNoZL7owE6oU+9EIbbQgKN0ytvXcADa0GWIeqGTE9wYPBDDNiaogAkJ4UR0zjwaps0W3Q7THD3nIylSjMTR6qXZwC44ADz719nggz5nOgEQgoQeEeUr7oECY6LSkzaQk9B7QJl+5b7owEohHTfWZ+3v3DQvxs3zHSfoYqFrJoMXkutMZmsbsAr5fl4b33YC069YMkXK28shkqRTRLSDDzhtudbiru3eEmc0I7wwFU1rn3hxZgzHAx+nf55udXAABPbFvKsqzMTKUEyXDGuZswMPT8/VX28ocAIMI6SizEtU4T0hKHf48rF0/H57WUtp02LRY2h8tHS2easqPEQjR3mpAol0IRJ4XD6Ub1pR44hxYkVvvwVsmRGi1Uimg4nG7SB4AKYROLhKy/s/LKZtjdHmz+9hy0dpvx8ekW8v2YKBESFdE4UtNGflfAcPW18SQkgZ2fn89aVRcVFWH58uXj2jEeHpqx7L9eb7gl/2jCiUUG/Mdxa1r60WW0k1V/qDBjk0cr/Llx3IW5yaj8uiPgQoCblau734IOnYUlhBIV0chWK0gu6SuMc31mG6IlIjxx3zIAwJtHLsPmcGOaPJq08drHlzArbVgbYfYhIymOZCJbXTCdCDPmc6DxegG3l9I8Z6fLUXG6hZUFa+/BWqKF0jHITAtKQ6vBp3QkPe/05/eOXSVlHitOtyBKIsSXDb2sfvQarGTBkpow7LxkHHQQAfNZjRZWuxt6kw0Wm5MICavd5TfmnI5hZ9ab/ujUNSTKpUidJoPdMbx4UcRFsRab9DMqyElCuiqW8hPwerFwTiI6dIOo0fTg/jsXorfXjDePXEaf2YZBG3sxxEUgGE6WIhAIYHe6ca3LTML4jg4JawCsRQbreTH+LYsWwzDggN5EORnWNemJsObicnsRJaHi1un5lEnFmJEqx4aSWajW9ODTs634vLaNhAe++N7XUCliIJWIAFDXfCsvhYoC6LcQH4XrFYsdksA2Go2or68HACxYsABr167FqVOnkJ+fj7i4uHHtIM+Nz1gF7lj2XyPVh1DvUV7ZPFz1KUoElSIai7OT8NlZLUkqEorQpAWkpqWfaHPllc2QRInw799fFFZ/uBpopOAuBOqjqNShty3NZGlp9J7kL7fezEqGojNasShLhcLcZB8NkX5h/vrAGcrBbch0HsvQbE0WJ2qv6IigqjjdAnUy9T5impt1RivkMgkkYiFJpnL4bKvPfqbL7UW/yY62nkGWFs4UpKQ/HAvKA384Ttr79YEzsDHGsPdgLbr7LYiPkeD+2+fjsf2noTfZWIlR8mYmoI2xPWB1uMncfvHVcHWs0qUziHbscHnYNbOH4sRfer8u4J57vIzaV9ab7NSYvCC+BDUNPZCIhpOc0DHlty6ZztrTZy4ky79oxOHT13zqRAeCaYWm2weApfNSUNPQg3D3Z5kbugtmJ+LIl1ryWSISsIS32TLsHU5r6Ra7C9WXupGjpiwzvz5wBn0MrZlOigMM5wb4urEPFns3JCKhT63t8WbEeti33347oqOjUVRUBLVajZqaGvz2t7/Fl19+iXvuuWfcOzga+LCu0Lhe4+KGA4XKWKpMccc22j6EAzc95/e+PQeL5lLxtSfOd6DP7IDV7oZ50IF0VWxI4zjw0SWcu9xLhdFoDejpt4Y8D9xUnYHiXcOF8pitQ1eflZWa8eMzLbjY0o/65j40dZpgGnTg/51shtPlhcvtxYmvOzBvRgKW56Xis5o2WO0uUkv6QpMe/QN2iISU1mVz0N64Yni8XhJf7fUCadNiYLJQKSs9XqC1x4z8WYk4f1WHrxp1sNrdrEpfcTES9A/Nvd3hgmHQQQRwTJQIsdFiImyTE6i26f3NlYsz0KEbJMJl1w+WYFq8lMwpTUpCDMxDecB3/WAJLjT1ES9tiVgInZGqqfzxqWswDjrh9nhhc7gREyVCjlqJrj4r+oec70RCAUnleq3LRMLDxCIBLrcaIBRS8ciDNhcrzMxfuBYzhj1RIYVYKPDZt7+sNcBid1OWBoZEnZkmh2nQwYo/Z2qR5kEH3jl+Fd19Fp9CHlyEAvgVxisXZ2DhnEScON8Bi8094vv/KGQAACAASURBVH41Fzo+vVM/6OuVLmAL9EAtP7plCRxON/a9dQ6dfdaAY7l7VTbOXe4lKU7tTg+69INYNEeF5ARZxDTsUcdh7969Gz/60Y9w//33IysrC1lZWSgoKMBbb72F1NRUbNiwISIdjDS8wA6N8R7XWMs6jqXKFD228S4tyYWZ+MLt8SJ9aH/ulsXDca2Pblky4jhoodjUaYLJ4kS8TEIEQDjzwMxDTsfijhY6Zjl3RgKr0tPSeck4WtsGq5164TI1FJUimtqXBaV5r1iYjpN1XdD2DrCScJyq70JXnxV2pwcCAciLd+n8FKgU0cRqkTVdgf/cejM+On2NpNL8bvFMfPFVB7qHXqR6kw1nLnYhW63E8rxUKOOk5PrHf1zIKrP5vW/PgUAgwMy0ePQaqHhsWmjIpCJ091l84ruPn2uH2zNcIEMsGi4dKZdJ8M+v2lm1lOmCHgDtsDc8p9OT4nDnitlIVEST4hRp02RE+CvjosiiiM7MyfaM9uCy1oimThMrTzrJ+T3oILH85ZXX0No9gKXzkqEzDJfHTFJGw8LIq71ycQb6TDZoewehM9r8/t3QYU4Gs2NEYZ2SEE0WXEzEQgE8Hg869Bb0GmwsYR0XIw7omc7EOOCA2epEr8HGmhem89pInLnYjdRpMpy/qmf1ITMljpVdrbHdiO8Wz2Qt1jxeoLHDiBUL0yP2Thl1LvGLFy/iueeeYx0TCAR4/vnnsW3btoh0bqpzI+2/RppAeX3DYazOV5HoAxD6c+TuPdPmfLFIQNIbhjIO7l44HT4UGysN6fpA5vCx/B7LK5thsTlZpS4T5VKYBh247zvz8N/vXQAA5M2ehgtNlPBxuDzIVlMOqs2dJihjo6CIHTZrF+enoWRhGis38+w0OSmeQe+b0r+B2su9+OmzX8Dpol6sAgFw8usuxEZL0IthIRkbI4HL7cXGFbPxu4NfsvaPvQBpz+ZwkxzWXE/g/7ynABea9MRUv6FkNsorm1gOSwCVCIXeFy/OT4PV4SJ7m1y4GiQd0qbtMZM84h16C/Fi1pvsEIt803nSOJweLJiXQMy23N83/cwaWg3kO3qjHS73sDBkPiO90YavG/U+Y6TzkzND4rjOezRMD2yREOjut/n9XoJcCrPVCQ9D4sukYsxKi0f9tX4IAFI4JNCiQBYtxpqC6azc7wBQkJvMyromEg47FXLJnq7AyQtdLBN9bLQYnYzYfgBwON0sh0ca2mEQGP/3fFCB7S/iKy4uDnl5eZDLI1dPd6pCewzTzirfNCjt9iL5/Nzb53Hfd+aFNRf+nK/CJRIe16H+QTL3npmxswBVBOEXdy0ecRxcT/NEuRQdukFsKJkFpVIGbadxxP4GWqjQzlLhPANuf5gOURtKZkMaJcKn1Vry0r/WaSLzffpiN/5jy80AgLc+v4L3TjTB4fSQ/fy/H78KlSIalXWduLVAjTipiMTDMvN2M8tXioQCvH+Ceh7F+an46ooePUMpPKPEVDWnXoMNvQYb9h6shd5ohwACsn8MgCQ44SYnoccggAAVp1tI6BQw5EMgHt7jpcum0vviJ+s68VmNFjKpmFXaMxgZSXGU6drqxLplM8jzio0edoxyub1o6TZDIACiJSIIBIBlSFh6AZzV9EAuk2DB7Gnk9811npKIhxd/17rMZG83US6F3elGbmYCNq6YjWpND/pMNrJIWbk4A1836lBe2QxZNCUuZFIxHM7AzmXMRQlXSDL3lem+MbHYXai/1k/GNlLiFcOAA3893EAEckyUCGsK1T5/Y4GENQBcaulHQvywVpueKIPOZIOT8/xsTg/cXv8WyeL81Ovyjg9qEr969Sp0Oh3y8vJYx9955x1YrVasW7duvPs3KiaDSZw2GVEOHuNvih0N420SVylj0NVnIdrYHcUz8a281LDaYMZFSiWikOePOTazlcodrVLEQBolCkvDHq1Jnbt/DFAvsk+rWzEjJd6n/CL3WqbJ+e5V2ZBFi/FZTRuutBnwr9+eE1Lfmeb5ukY9PqvWhj0OWiO/1NJP+iMSCREXI4ZMKkZDqwE/XpeLtt5B/J8NC1CyMB0NWgN+vI5amA1YnWjtNqPqQif++VUH0YytdmqP1u70oPZKL0yDTgiFAty7bh4qzrSgudNM9k3LK5vR0GqAShGNz2racO6KDmmJMqQmyqBpNSBZGUP69pttS3H+qo6Y4SViIXoNVpLDu9/sIGkqnS4P/mVROqsE5ex0BcpunQtplAgp02RIS4wlpuq4GDF6hrTFuBgxkhQx+MGabHzdqMPVNiO6+q3weAG708MSWkIhIB6qJZmaKINQAMhlUVhdMJ3S8HOTSR7xRIUUFpuLjIeGbs7l9sLt8UIsFsLj8cLp8pB51BltkMdKoDPY8HltGwwDdrJtIBYO5+7Omq6Afshk/8t7ClDXqIfZ4iTzfaGJKjvqhRfXOs1DqVddMA06YBp0QG+ysUpThopYKIBAKIAnzH3qkfB4h/eqXW4vuvoG0Weyh+zA5vZ4WSZ7ajtiWKtnxnx4POzPNK3dA8hMjhv31KRBBXZBQQEeffRR1NbWwmw24+LFi3jllVdw+PBhvPLKK4iKivJ7XX19PXbv3o2NGzeSY/v27YNAIEBFRQVuvvnmMR8LxkQLbNocyXTu2FAyE0tyxqes4WgZT4FNvwT/38lrcLm9mBYvRboqLuw9VLod+qUdqtMYS2APxdQ2dZiIk1OocPfRN5TMwpIc37zHzP7SNZr/eriBtdcJAN8tmon1y2cGvf7AR5eI+VMsEuBSSx8aO0zQm+xhOZ3RCxWLzQWJWIh1yzJD9geg5/3NI5dxtLaNmNaZDlHUf5Tj0+w0OalLrYyTkhze5ZXNOH2xC8ZBh48go3G5vbDaXejptxKhqjfZSO7n1h5qv/tUPdWO2+OFzemGVCKG3mSHYcCBRLkUs9PkOFLTBp1puLCGxeYimhIzh7csWoJ+sx0lC9Pw4rtf40KTHltuY4SFDTlY0cIrJ1PpsxdO5wAvnJcCiVjo44hGIxJQiUro3N59ZjtsDjfuKJ5Fabd0rHlrP9weLzr7LMTpLjZaDI/HwzIJS6NEfqtiudxedPdbcb5RD4uNXS6TOe96xv76sdp2mK1Olj+B0+3BxhWzUbIwHV19gz5FRrgoYiVBq3TRJCljfEztTKQS0YhOZzKpiOX57U94PvajQtRoegJmNRP4u4hDKOnEohgFTEajjARi1AJbKpXihz/8IQQCAS5fvgyj0YiSkhI88cQTAYU1ACQnJ+Ppp5/GT37yEwBARUUFHA4HNm7ciKNHj8JsNuPq1aujPpaVFfylO9ECm/uSvylLhaihAuvhwq2QFEnGU2A//b81OH6uHTYn9bKx2t3QGazITI4PeSyaln78+R8XcOJ8B3oM/p1fAsF0OhurpYPWVKmMUgbcVqgO+N0DH13CqYtdqNb0YNDmYglriUgAk8WJlYszAl7P/e3QFZiYpsFQF3/0QuXclV64XF5WcQ6uAxr3d/biu1/j+Fft0JvsrJco0yFquD+zcPyrDjR1mKBSROPNz6+gRtODI19q0dozALfHCwsj/3QwmEKV623NvNrtoRaB9Llf3lOAtUszSaUpgKqYRZtd5TIJzlzsJn0YGBJSFWda0Gem4pxprZtp/aCLcQBAtaYbxflpSFRI0dhmxO1FM8ncvVx+we9Lnrt3arY64QUlEE7Vd0FvskGdFIfay734vLYNje1GWO1uMlany+PTrosjsBLio4iAD9fLekjxB0At4j49q2UVz3j/n02IkYrhcvv2gyaQYOTicHkCatdioQBisZC1t+4Pl9sLsTC4Q9mJ8x0koU8g6Jrdo8vzScEcS5fecl007JDisNeuXYu1a9eGdVNmJrSqqioUFRUBoJKwVFVVAcCoj5WWlga9d0KCDGKxKKz+jkSwdHFM6q5Sq+xj5zshixbD6fKgQz+ItUUzQ26DyX8O7bn9zy/XhH0tt0/5WSqfc6Pp00j3+tunGuI1y2TruvlYUeC/2IC/dv77/TqW9yoA7LhrMTJTQ/Of6DLa8c4XjSznmNKiWSH3gSY2Ngp1jXqiZf7xnfP4wW25rPmsu6rDK+V1uObHMYgmWipGZmr8iHP+xpEruLVAjegoESldSLO+aCZsLq9PG8xnXHdVh6Z2I/73k4vkZdqgNaB/wI6ddy1BfpYKlefbWW388Z3zAAClUoa/farxG1crEQtZRTZoXvv4EhEwz7x1jrxM/SWQAYAVN6XjxFBs8TS5lFSxUsRFYf8/hn0enny9mmjHzOxWNCmJsVi+MB0AcKnVgJvmpUIoFuHu23IAAMYBO9EO1yzLxPkrvbjaxn4+zBe/zmTDGU0PFuYMh76p0xTkd+Rye7B90yL8/A/H0G+yostoR1O7ER+ebAq4RxpM/ni81D7us++c99kvDZWSm9Lh8XiRmSqH5poe5y771/IDQfdPEReFvQdriUf7L146CfOgY2hxEFz4BUMoBNH0g43R5fFC6B75PinTZOju9/0NMpFFi2Eaev/EySQYtDh9fjtUn/xLa6EAiI4SQxolYpUG5cK8etmC1LDfK6MhJIE9VrRaLdRqSiuRy+XQaqng9rEcC0b/CA80XMLJ3/zyu+epdHrG4TCDnn4r/lahgZdh/hoJ2qzePfSCfOS5L0aVA5rp+MbNYR3quMLxLP66oRuXhjQcJqkJMbjWbkBv78htaFr68donl3yEtVwmwd8+uRRSsfikpHgYDBZYbcMLh5WLM0LuAxN1YixixEJcaKSKKZStzEKqQsqau1SFFPety2V5GdOhJbSjjXlIiztR0xpwLjUt/Th/uRcqRTRyMpWYnhTLSqhx+HQLls1P8Xluf/mQSmy0e8sSvPz3r9BjsPpoPrcvnwmDwYLeXjNy0uXo7TX75H/+y4f1KM5PJWNdmpuMs0MOPP+2Ic8nSxjAfnExBWAgbe/MhS5kqxWQy6IQL4si1oT/+/+zd+bxcVRXvv/1vqoldUuyZS22sY0XWcYY2wQ5IQPEWGQmy0DAkBBCkmFmMu89yCT4MW8ghoRh5mUEmcBkXiY44MCExQSYrCCbLSSxABtswBaWvGBLbVvW3motvaiX90frlm5VV1VXb5K6Od/Pxx9Lrapb91ZV33PPcs/5xiY88eIRtB1O1MSeX24TFg5xAGajDuEpH/i8cit8/iA2f2YVOrqG0dE9jP7+UZTZTUL+7neOnEPT6nk4/OEQfvWHD2G3GFHqNGOE+eL1OpEQCYSiOHnWj3sffRNOmwlWsxH+8VDCrD6l1V79v38t/HzPT9+AzWwQhEOmZCqsDXqg49QQltWWoc5jR9dZ9Rzealxz6RI89/px4ffhFEU71DDqdTAZ9TAZ9aItbanQ6XTwuMzwlFpx1JsIrLQY9QhN3R+Py4JzQ+pzu14P0fMIhSKafdlscRGLJ342m7Qrfb8/cBpfuPQ8zcerkXUu8Wypq6uD1+tFQ0MD/H6/IICz+WyuwSY+aQYkxl9/tkFzsFNH1zD2d/SKfEenzo2iu3c07eheviRcplWftEZIsxSU0sT6VlPC17NA4/hXLCyHQcbRtO2L63BWIV2hlEPHB0Rj97gSeYa19oHnV386iYGRgGqkuTQiHpgWXrzPTW4LCF9sgfV50B9Ez+A4LGbxpGE1G4QIZ3YuL3DlcmUvml+CaDSGp185Co/LKqT7ZH2QRpPvP9IrVJkaGQ8LP7+0z4um1fMwEYzg3ePJAr3EZsSozH5bKeFIDOuWVeLKjfWirFx73zuLilIrPrtpEV54swve/nE4rEYhwMlmMWEyEkYcwKA/BINeL1qQJvpgwp593dh7+BxGxkI4cWZE0CKl/lN+Fww/Duafl4N/luHJmCbfrRpq241SUVlmR2gyioPH+uHtHcWgRBvU0vai+U44rCY81tqhyZzOC1Alaisd6OodS2mW5tEhYVoPTYZEWi0faZ+q8hcASAt6KaUplaOqzCbEHTmsJgyPym9Hk2MiGMlJVb1UzIjAbmpqEjTjQ4cOobm5GX6/P+PP5iohha0ORoNO83Yipl1azUZUlVkFDeOCpR7F2rVybXT3juLgsQHRxJPu1gPpdp5ULyTbsiMlHI1hZDysqZqNELA3VdeXN4W+3dGXshoUa+OFfd2isS+pccFTak2roo50a8zBo/244VPnJ/lwmcBtXOJR3H/Ls6lxvnAey0HN9jfzfbZbjUlWu3FuYmDwqSe3XFyPX/5heq+ozWwA4olgpHAkhon+8aTzO7qHRYsRYCrNZX05fvmnhGZ6+bpavHrgNLx945gITuK8BS6sXuzGwaP9sJoNCIajGA1EYDLqEI3GUyateO71E6ifV4INK+cJqVd1JgN27T4Db/+4IDz4aGQ+iUUkGsfZwQm0PH1Q8EPe/chbGBoNClm7UlHqtGDJAhcWVDjQ2e3D+uWVmIzG8N7UYiTf2C1GhCbTj7YGEtaBodGg8H2Tc2EwYW2cWkAbuCpdAFBiNwFxoKPbp9n3nUpYA8CpXnmlRQlpClG+K7mNJ1dn5UI3zlsQwbvHBoW64ukwE/nE8yKw29vb4fV60d7ejoaGBjQ3N6OlpSXJJ53NZ3MJNmEr7U2MRONCvlo1gSnViBlSs2gqmAbHT+SXXVijaYXKI03eofRC7tnXLSusWRm9WAwIx2L4H//2Oj63abHiwoMtNPiFD/+FffHNLtjMhpQLlxULyxGKQTDrOqxGvNM5gKU16ZkuDx4bgJ8TEgaDXrV61MBIAHaLATqdTnHbS8Oicgz6Q9h76JwgpJl2zKohAQlzulzpR2B6Yciuu7y+TBC47xzpw9qlHkEDdjnMON0/jgg3C46Mh5LO5/c9/3bvSbxx+BysFiMGRgLw9o3BaTXBPxEWzOwVpTasqC/H2YFxbN5QJySuYCbFVExG43jqlWO44YplwoLlG19Yi15fIK3AKV4wnxkcT9Kw1AiGovi7v2wEAPy//z6Ey9fV4k+HzooKVOQK3lLAYFvPMiFxj7R1kj176Z7m8cAkRlWitmeKdLRgNeTiHLSi1wPHT/tww6fOTyrKogWPy4LWN7s0ueuyIeN62HOZma6Hfcd/tmEiOKlaqeZrn16Bj08FyMjR0TWMp14+KrtS1gFYVleGf0hRR1lqHnXZTUKVocvW1WJBhUOkYaqNi5kV+eQdK+rLFV/IR3/7Af405XtU4i8/sRif2aSsIbOkHqMTIZwdTBZWqxe78a2ta1WvwQTpztYOjIwmskRNcAuguionbrhiWcqFk9KzYLWSea1YWk5QbeJgRSjY4mnR/BLB9bF2aQU6vcOK5ljG0gUuGAx64boelwWb19fhyo31uPXBP0Cv02H1ee4kCwtj7bIKBIIRUS3milIbbrhimWxpUDmuurgO1162DN9/4gAGRgIYHg1pTgXJmFdug9lk0HQ9OeaX2wRLjNWkRzBN87TTasRfNC3ClRvrExXC/EGEJKUXM0WHxH0NhqOIxbUJk0zGQExTV+nQXIREytqlHhz1JsrFZiIRv/bpFbCYjTmpia3mw9Yr/oVISUfXMO5+dB/6fepl5dYu9WjSblcvdst+HkfiC8/7OuVYsbAcN155vvD7WGASkWgc/olJ7D/SK6rhm4qDxwYwyWm637xuLdbIRJl3dA1jz77ulMJ6XrkNHyj0v6NrGN9/4gA6vT50en2ywtps1OPE2dQZvp56+Sj+/fn30c9MwBKBtXl9bUphDSRiDuRg9Yg7vT54+8dl3SBxTG0bkTn/vAUufJOrtrW42oXPblqEz25ahEAoAqtZPdBl5aJylLmsoudsMhqw91AP7n50H8YCEfgnJjE4EkKpY3rrZanDDKNBB6Neh4lgRHQ+q0j01MtHNcUIOKxG7D10Dnc/um8qEYt2YW3hMoVVVziwenFm/r7aSgdqq5w4b4ELDYvKNWUVk2I2GfDKO6fxrR/9CZ1eH/qnCnQk9dmU/jQZRyIbGbsv0tujY/+4UA0S1pmzaL4TvcMBmAwaNlnL8O7xQUVXyrzy1Fu1fvWnU2nNr5kyIz7sYmV/R68m7cDbN476ecqrJuYr5RMaSGFmzNR96oPFpEdoMiaaRLUGgEi1dEbLkwfwt59bnXTsT359WHYLl5RINK74QksDn6ToMBV8ElH2owtBfyorbJNBN5V8RdnSwczEnlIrHNZEggpmsnPZTejsHha5GsKRKOwWY5J5U8k0e/jkEOxWk2DCPjs4gQ0rqrC/oxfHz4ykNAf3DIzj0gsWYH9HH5pWz8O+I31C5Cw/VX3Y4xeij416ncj/e6rHj6dfOYam1fNhNukFv/vkVBYtKQZdol40IDbtKsVsqBGaylvtspvR0e3Dh2dG0jZlGg06zPc4YDbq4bKbcKRrOG3tXgcIFbtUr6XXad5nrBUj70suOvtmbkjnndDrgJ7BiYwWbVroHQ7AqNeJ3EpSUu0fzxUksDNkz75uYftJKtYscePzn5AP+e/oGsauV4+nfDlXLnKn1Ay7e0fx+4NnZCcYfsJWY8XC8kSCEInA1ut1out3dA3jh8++pzlKNhiexOXramX7DQC/e+OU4peUD5ZR8qOnEvqJMegVrQQsSI+N23g2cU2LSS9IK7fLijVLK/DiW91oWj0fQBxvtvdqypzEiETj6OweRv2UCfuXf/wQP/n1YfgnJjWZ4kYnJrGivhx/OnQW3r5xkV+SP91o0IHFM0knmnAkhiNdwyixmxIpM6fo901vmSl3mjE8tf1pxcJyBMJRuEssOHZ62sqhRZCZTfqkd6R3aAKBUDStLT/8uJw2E8xGHd764FzGEdZahYHaJJ0pqfJjzzb895BfrKmRTnUsLaTTVDyuPXmLVlghFiA5KE6OdcsrZz+XeKGS70xne/Z147nXTwh7QlMx5A9i0XxXUhacPfu68fiezpQvg9Ggg8mgV03L+cjvjuDtzl7ZMnZA4qUeGAmKsnwpZTr7xWvHYTLoRdHQwXAUB472o9ptT6SsfOWYalIBKdFYHF/782T/N6v3fPKcX/ELzz5n5Q3XnT+dGpTP0LXjN+0IhaOKX95oLI7R8XBSprNHfncE54YmRDWU2TV5jdM3Fkbv4Di8fWMYGQvj2OkRUR5jrdjMRpwbDqDabcfPXzqKCRV3ipR4HOjuHcXp/jHodTqMTWm7JZJyhGyxoaSxx5GY5MxGvXC/4lO+VpvZAKvZCLfLgvPrymAyGdC42I19HX3Cws9q0msSZnLX1+t12LiiCsfPKCeZUSIWB0LhKLx94zkPDPuoIi1laZrKUw5oF5zF8CgsJn3CauMwC8l8AG0LkQFfEIurk+f4TMg60xkxTUfXMP77jx+mFdko1U4ZV26sx+GTgzh8Ut03rUMiklepP3ImbDlSbTtgmuaR7mFZky6/l3wykp45NBaDKEo8nX4zNq6swqJqcdEMvtzdyR5/yudy45blGB0PC5o934eWpw5i0TwnhsdCimb+Pl9wyhScWWCSXpcwxQ6NhvBvz7ybUYTs6YGEsBoZnxSSPcjtf9ZiIgxNRrHAYxPiBuJIuE8C4SgsJj2+9/WLsb+jD8++dlxUkCIbf2sgFMWL+1InQFKiGISDFkxGnVAsRY1soqOBxPMosZswHphMpMLNwLScSR/0uoT1TMsYldBqAdACW7ims3+cMVOx26Rha4DPS73zxSNpR5Hy2ilfAP4nv25Hpzd1IBVb4cnlwK4os+HwyQGcGVDPAOS0GTE2MSnSTqUadstTB7Gvo09Wc9HrEpqN3WLEvz/3vmyKylTYLUZccVEdKspsqCizoWdgLC0t69S5UfzZ2hpUlNmSqmi90X4O4UhctRKQXg+EQtGE6bvbN+WmmK6oZbMYUeq0oJJLoCAl3VzNUvizDQa9qL/pmNYZVrMhKxOrQa/DyJj8+xyNxfG7N07h/eMDOYmcLgRS5ameSdLZopYNC6uciXzvKXYn5BqXI5EDPZvbne2jWuBJzo2fCasXufHnU7nls4U07Bwy5Nee/Yan2m0Tadn/8d/vq0aWS5HTjplG/HZH6n2D1122FBaz/ONmC5F+laC3WDyhwY4HJzNKKgAkTK4rFpYL/d69Pz0tKzK1d3fT6vmon1ciCv7SMsnGYomgPKZ53v3IWxj0B4Xtb76xcFJZw3wizZOdySI9EIqm3Psth14HGDlzOM/SWpeQc9tuMRa1sBb5a/VAnuKW8oYW/6oael1Co+TrfM8UWuNq8oXJoMOQP1GSdGxiMuOFmtVsgGGG7h8JbI3s2deNF9/qyjjIpbrCCSAhHNMV1kBCuFx32VIhaQgTskP+oKYX7dnfn0iK8j50fAC+qUCjgIYkDs0X10+lMEyr6wKTUyfufEF9caDG5vW12HvoHA4eG4Cn1Aq7xYDQZFRzn0rsJiGYZNq8rf1Z5NIEx8i2uYlQFAp1NhSJxYG45MVxWI2oqXTgVM8oSuymnO1JnsvwSVIymbClaXhnEh0SuxmUrEGa2tAh4wV4oTMZjWMyGs/KvWMyJPKm52L/tRZIYGvg0PEBvHLgtKbtS0qwLF0A0hbWALB+eaVIWMtlRFNDTiA/uacDI/6gbKEIOVg2q0wJhqO45V9fzVjgA8DPXuwQJladN31h5xsLCcFYmWxLmqsBvpnIDGld4Xgc+IcvXYRb/vU16HVApNDUzQwosZvxybULcOLMCNpPqceSyDFbwhpIvPvZCGsg8zzmRILJaByTE5MzsgcboExnKenoGsYDu97N+ov58cZqjAXCQsrIdLFZDPhfV68BADz8m/aMTLeVZVZ89aqVAKA5o1WuydaElw/YvvVCIxcpNK1mPfQ6vbCPvFDvRSZ4XBbYLUYsrS3F7989S1HnRFZcduECfHnLiqzbmfVqXYUKi2TOVlgbDTp0eoeFIhKZEAhF0d07iis31gvlAdPlM1xQRLpR3rlirglrIPd7OGeCXO17DYZjAKbHXyj3wigpZJEJg/4QBhHKOJ0lQfAcP+MXCvrkCxLYwnNJYgAAIABJREFUKnT3juLY6cxrzDIi0XhWwhpIbGnq6B7G068eT32wAs++/iEsJj18o6E5KTgJ7cyVSObZYq4nHyE+eqRKe5wLKJe4CvXzSubMxLi/o09UHzsTwpNR9PuCJKwJgiByTGd39spdKkjDVoCVjJwrxOPIestRMIOEAARBEIQ6DYvdsmmPcw1p2GqQIkoQBJEx+ikJo0MiwDEbjFOVuExGHUwGXUaJhvi2DDmQfkaDDlazATZLbkprprxe3q9QwOSr+gtBEESxYjTooNfp4LAasaS2DO4SC84NTWBTYzV+98YprKgvh7vEoikeR6dL7Fxwl1ixoNKJ8GQUmxoTNRX27OvGibPp5aN32oxw2c2wWoxwl1hw4Gh/Rm5Pk1EHh9WEpVPjW1JTmvqkHEACmyAIgsgJdqsRqxa5RUKM1zzZzx1dw6KKWDw6ABazHlazEWaTAdd8cknS+QCw69VjKftjNeuxoMIplL/880sWCX8rsZlw1OtLKznQ1suX4kjXsLBokPYp35DAVqB3OP1c2QRBELmA32PPLL9MEUxVm1m2Pe5no8aiIkrt6HSJgkZAItNbTaUT/vEw7FajIBBTCbEVC8vh5LIOMsxGPWqrnNgylSRKqa2OrmFMqOQA1+uAZbVlWLGwHAsqHIr9Wbe8UqgHr9ZWeYkFFaU2BMNRfPPaC1SPzycksBWYV26f7S4QRYLZqP/IulfYfnGWOnWu7LrIJUwYGg06WMwGweQKAE6bCfPddthsJpw8M4L5bjuOnxnB2f4x2ZSYBn2iDafNhLVTQUznhhLKw3x3Yk4a8gdx4Fi/YnEQpqG6HBZYzQasqC8X2pnvtsNqNuC3b5xKWVyEjctiNsDlMGO+2y70AQCW1JTi059Yghf+eCIjLdOg16PaY8fwaBCRaByVZTbYLEZs2VivSeDfedN6oZYAT8NiN2oqHLIaPs+efd0phXVtlRPuEotIo55NCkZgt7S0YNOmTWhvb8ctt9yS9+vVz1PONpMKq9mA8hILejKoaAUkihAYDHpEo7GCTB2o1035nswJP9HpDBNT6HUAdDNXtSjXmI16mE0Jk9wxry+jGMZsSyfmE9Y3QeviooD0eh3MJj1WLHQjPBkVJvrX3z2jmJyFCXWdTod55TZYLUYsnZp0mdA6OzCOAZU89Ho9YDEZsay2FL6xhPZW5rQI1z9+ZgQfavR7GvSJd9hpM8E6lVZ4RX250BfGfLdd8NGeODOCJTWl2LCiCvs7+gRhUVlZImRg3N/Rh6dePopIbBKxWBzxOGA26VHqtGDt0oqUggYAtj/yVtL3ymE1wsEJetYPKR1dw3jpba9sauNEIJYOLrsJS2rLsGFFlWhMcmRqEua16Eza29/RB6fNiNBkDNFoDAaDDhWlCaF//RXLUp5/5cZ6+MZCaJWUejXoE+9MdYUTVrMBf/f51QotzDwFIbBbW1tRVlaGpqYm7N27F62trWhubs7rNVcsLIfVbEi5FcpsSvhaypxm0cSwpKYUv3jtuOrkwmM06GAxGeAptYr8LD/+5WHNfTbodXDYTEJfxgKTmicnKXo9YDMbEYvHU+YsN+ghTDruEiusU6tkxn/+6rCmtI8mgw5xAHarCR9bNQ9A4j7+5FeHM9LMrFN+sHAkhok0Klkx2IKhotSGIX9QU7IOs1EvBNvwk11FqRVth88pnscL5sSCJ6FpMX8gALz8jleTVhTn/uc1tr4MijwwYWw2ibUspq2x/wGxJrikplR2oucFL5DweQ6PhlBeYsGFy6sQmDJzKgmtf3r8bfjGQojF4sI7YTHpUVPphNNmEjQhtUn/tof+iFEZv6VW36kacv5auWPYvQEg+lnrdea77YjG4hgYCSAWi8NmMcLlsOBzH1+sSTs9r9qVlDvdbNSjosyK1Ys9ov7ky0ebbbs1lU40ra4W3jUAaQd/fdjjFxVw0esBl8OC6y5fBpfLCn+G1RnzRUEI7La2NjQ1NQEAGhsb0dbWpiqwy8vtMBoNWV934fwSnDzrF8yZOiRKEtbNc8LtsgIArtiQEEwfv6Am6fxXD5zG4EhQVkNyOc2oKLViYCSIy9bVYsUiNzpODeGvPtcoOi6VwDYZ9Sh3WVA/rwQ1lU6sWOQW9eUz3/6VprFWuRN1toPhqKg/Kxa58dNfHsboRBiTkRh0+oQpy2kz4dILa3Cmfww1lYlKZCsWuYX2+D7sfOGDqRSYyVjNBrhdVkRiMXz1Lxpkz//Vnz7EWYV63zoktDmr2YB5noTgkD6bjlND+NUf1PfU63SJCau0JHEvh/xBNC6pEMb0p3fPovucH71DE4hEY7CYDHDYTFi0wAUAwj3g7wM/Bt/EJN4/MYjwZBSxWBzRWByWqbFbLQY0LqkQ3Uu5dt5sP4exwHQZQL0uYYlhz3/IHxTGLtfOD558R9F3aTLqYbMmpoOKUivcLiu6e0dxyepq0bsg956nw31/9/Gszr/2U+cLfek4NQRA/n6rsaDCgZ6BcUwEI4ghDovJiPISCzasmqf4DucCPkf0/7p+XVZtXXHxQgAQ7gX7+dOfWKJ2mkCZy4blU2vq0YkwNkwtkAEkzUGpUMt9nU8+nYPrrlzsgctpEb4v7Duo9T7ONAUhsL1eL+rq6gAALpcLXq96HeXhHAWMXb6uFntNPVhcU4oFbjv2HurBfLdd1twiV3Dk8nW1sJgM8I2FMBGMYEGFQzDT3fPVjUnHL1/gSmqnfp5T0A5HxsMwTNkNq8ptgm9K2h++DZfDhLHAJOJxcaEIvQ74zCfOE/xqcmNaPiWMLlpembY2wPfh0qmJ79zQBHxjIZQ5LfCNhbCivlyxLf78z338POHenxuawMkePxZXu4S/y2lVvAly+QIX9r53FvYpgdQ3HIDJqMdkJIbz68pEJk2lMfn9Qaw5z52kGamZ3vgxlNlN+PKW5cJ5vCaQStNg7SyqduGqTYtx8EivyASrxfwHACsXugVt+MDRftitRuEdApLfIx72LuSysI4U/pmp9YP1hf3P0Nq3y9fVymq4au9gtmgZWzrI3QO5+UOJNee5hfHypnsgvXHnelwzzee4+go8/f2jszY2tQVQQVTr2r59O5qamtDc3IzW1la0tbXhe9/7nuLxub7JhfxS7u/oE36WCopPf2JJwY4rFYX8zNQo1nEBNLZCpFjHBcze2Aq+WldTU5OgVR86dCjv/utiQotPjSAIgpj7FERq0ubmZvh8PrS1tQGA4M8mCIIgiI8KBaFhA8C2bdsAkLAmCIIgPpoUhIZNEARBEB91SGATBEEQRAFAApsgCIIgCgAS2ARBEARRAJDAJgiCIIgCgAQ2QRAEQRQAJLAJgiAIogAggU0QBEEQBQAJbIIgCIIoAEhgEwRBEEQBQAKbIAiCIAoAEtgEQRAEUQCQwCYIgiCIAoAENkEQBEEUALp4PB6f7U4QBEEQBKEOadgEQRAEUQCQwCYIgiCIAoAENkEQBEEUACSwCYIgCKIAIIFNEARBEAUACWyCIAiCKABIYBMEQRBEAUACmyAIgiAKAMM999xzz2x3Yq7S0tICnU6H1tZWXHTRRbPdHc20t7fjjjvuwOc//3nhM7mxaP1srnHrrbdi+/btGBkZwaZNmwAUz/haWlrw05/+FP39/WmPY66Pzev14jvf+Q6uuuoqAMUzrh07duC+++7Drl27cNVVV8FisRTN2Hbt2oXTp0+jqqqqaMa1a9cubN++Ha2trfjRj36EqqoqLF26tCDGRhq2Aq2trSgrK0NTUxN8Ph9aW1tnu0uaaWhowOHDh4Xf5cai9bO5RltbGx566CHs378fzzzzDLxeb9GMz+v1Ytu2bdi5cydefPFFAMX17Pg+FdO4fD4fnn/+eTz//PNwuVxFM7bt27dj9erVaG5uLqpxNTU14fnnn8fOnTuxZcsWNDU1FczYSGAr0NbWhrq6OgBAY2Mj2traZrlH6VFaWir8LDcWrZ/NNZqamoSfV69ejbq6uqIZH+tbe3s7tm7dCqB4nl1rayuam5uF34tlXF6vFx988AGWL18uTN7FMLa2tjZ4vV54vV6hX8UwLmD6ewYAo6OjcLlcBTM244xerYDwer3Cg3G5XPB6vbPco8xRGovWz+Yifr8fq1atAlBc4/N6vfjJT36CDz74AFu3bi2KsbW3t6OhoUG0iCyGcQGJvu3cuRPt7e24+eab0dTUVBRj27t3L1atWoWmpibcdttt8Pv9RTEuHvZeAoXzPpKGrUBdXZ3wMPx+v2hVVmjIjUXrZ3OVXbt2Ydu2bQCKa3x1dXV46KGHsGrVKrS3txfF2O6//37s2LEDd911F9544w3s2rWrKMbF09DQgKuuukqY+At9bKOjo2hsbITL5cLWrVsFzbLQx8XzwgsvCPEUhTI20rAVYCtlADh06JDInFdoyI2FrZhTfTYXaWtrE0zGfr+/6MYHQJgcimFsO3fuBJB4VnfddRe2bt2K1tbWgh+XHA0NDUXxzBoaGkR9kgqrQh0Xj9frhcvlAlA4cyRp2Ao0NzfD5/MJPgredzrXaW9vh9frRXt7OwD5sWj9bK7R2tqK7du347bbbsPVV1+Ntra2ohkfG1trays2bdoEl8tVNGOTUizj2rFjB7Zv3y5aRBbD2NhYWltbcejQIdxyyy1FMS6G1+tFY2Oj8HuhjI3qYRMEQRBEAUAaNkEQBEEUACSwCYIgCKIAIIFNEARBEAUACWyCIAiCKABIYBMEQRBEAUACmyAIgiAKABLYBEEQBFEAkMAmCIIgiAKABDZBEARBFAAksAmCIAiiACCBTRAEQRAFAAlsgiAIgigASGATBEEQRAFQlPWw+/tHc9peebkdw8MTOW1zLlCs4wKKd2zFOi6AxlaIFOu4gNkbW2VlieLfSMPWgNFomO0u5IViHRdQvGMr1nEBNLZCpFjHBczNsZHAJgiCIIgCgAQ2QRAEQRQAJLAJgiCIvBAMR3Di7AiC4Uhez/moUJRBZwRBEMTsEAxHcGZgHB6XFS1PHUTP4ASqPXZ85yvrYTWri5xgOIJ7H3s7rXPU+lBT4cjo/LlK8YyEIAiCmFV4getxWTDoDwEAegYncGZgHEsWlKqef2ZgHD2DE2mdo9aHbIT+XIRM4gRBEERO4AXuoD8ET6kVAFDtsaOmwpHy/JoKB6o9dtE56ZrI5YR+sVAcyw6CIAhi1mECl2m32264EIP+oGbTtNVsxHe+sl4wZwNIW1uW9kHLQkFKMBxBZ9cQ7EbdnNLO505PCIIgiIJGKnCtZiPKnJa022Bm8BNnR9I2kcsJ/RNnRzQvGuaySX1u9IIgCIIoCniBmy2ZasusD5kI31z40fMFCWyCIAhiTiKnsadDJsI3Fyb1fEECmyAIgpizZKOxZyJ82SJhIhInHzZBEARBzASZauhWsxF1NSU5LySVLSSwCYIgiKIllz712Yb2YRMEQRCEAnMpVSpp2ARBEAQhw1zb4kUaNkEQBEHIMNeyppHAJgiCIAgZ5FKlziZkEicIgiAIGbLdB55rSMMmCIIgCpp8BoaxKPPZFtYAadgEQRBEATPXAsPyCWnYBEEQRMEy1wLD8kneBPatt96KDRs2oKWlRfispaUFbW1t2LFjR04+IwiCIGYfLSbpfJmtcxUYNpf2WyuRF7tBW1sbHnroIQDAhg0bcP3116O9vR1lZWVoamrC3r170draCgAZf9bc3JyPrhMEQRBpoMUkneqYYDiScWBXOoFhSteR699cJC8Cu6mpSfh59erVqKurw44dO4TPGxsb0dbWJjo23c/UBHZ5uR1GoyGnY6qsLMlpe3OFYh0XULxjK9ZxATS2QmQiEheZpCcicdTViMfa2TWkeEwgFMH2H76O031jqK1y4gff/CRslvRFU11Nuerf1a4j1z9g7j2zvHrm/X4/Vq1aBQDwer2oq6sDALhcLni9XgDI6jMlhocncjqOysq5lwQ+FxTruIDiHVuxjgugsRUilZUlsBt1oopYdqMuaaxqx5w4O4LTfWMAgNN9Y3iv41xecn+rXUeufwBm5ZmpLRLyKrB37dqFbdu2AUgIXK/Xi4aGBvj9fkEAZ/MZQRAEMbtoMUmrHZNOCcxsTOdq15lr+62VyFuv2trasHXrVgAJTbupqUnQjA8dOoTm5mb4/f6MPyMIgiDmBloqYikdo1VYZrt9K9V1CqGqV16ixFtbW7F9+3bcdtttuPrqqwWfs8/nE/mks/mMIAiCKA60JCfJxfatuZQEJRN08Xg8PtudyDW59jsUs/+pGMcFFO/YinVcAI2tEJnJceU7QYrU3D5bz2zWfNgEQRAEkQvy6WeWWwwEQol92XPJpz03ekEQBEEQKciXn1lqbj/Z48c///wATveNJWnz2QS+ZQsJbIIgCOIjjTSCHICwBYz5y5csKJ31vOUksAmCIIick4kmOlvaq9TcDgC1VU5Bw2afyQW+zWRkOQlsgiAIIicwgetxWdHy1MG0NNFcaa+ZCn2puf0H3/wk3us4J2onnT3j+YAENkEQRB7gBUe258+VoCc1eIHrcVkw6A8B0K6J5kJ7zYXQZ/f9ApdNuD7/LGYzwcrcfwsIgiAKDKngePDbl2V1fiHUeOYF7qA/BE+pFYMjQc2aqJz2mu6iJVuhz9/32ion/vHGdQCQ9CxmK8HK3H4DCIIgChCp4Og+54fbbsr4/Jn2lWaCVOBuu+FCDPqDisJWKozl/MjpVviS9sHjsqa1NYu/76f7xoTkLHPlWZDAJgiCyDFSwVE/34UxfyDj89MVPLOB3D7pMqdF9lglCwLvRz5xdkRRUKqdz/qQiR+dv++1VU5h4TCbfmueufnkCYIgCgypxscLL5vFiLE02spW8MwWWvdJa7EgqAV4qZ3P+qAm8NX6z+77BSvmC4usuVIYZG4+dYIgiAJCSePLxnSajeBR6uNMCh2162mJts62wlemEd3svvOLrLlSGIQENkEQRJak0hizSXOZi61EMxHEJo2KV7uekjCW82trrfAVDEdwsscPAFhc7cpZKtO5FK1PApsgCCJL1IRqMBzB9h++LpvmUgu5EDz5DmKTLghuvPL8lNeTCuN0FxX8+cFwBN/92X70DiVM2PPcNtx984a0NWPpomOuReurXvn++++HTqdT/Pu3v/3tnHeIIAii0FATqmcGxmXTXKbbfjYCNt8JP6QLAiD9QC25fN5mk0HTIuXMwLggrAGgdyiAfUf6sHFlVVpZ1qRb8eZatL7qSDZt2jRT/SAIgiholIRqTYVDNs1lvlELgsu1lihdECyudqV9Pb6NeW4bHt/did6hgCbN1uOywu2yYGgqWYtBr8PPXuzAb/aexJ03rZeNVpfeH7mteLOd2UxKxvWwf/GLX+Daa6/NdX9yAtXD1kaxjgso3rEV67iA4h6b02VLSnOZT2bKlMs/s1z4elkb4ckoWp56V/j8zpsuUtRs+bG6S8zYvL4eu147LvzdU2rFvV/fmLSHW3p/ACRp2GP+wIz7sLOuh/2LX/wCDz/8MPx+P0pLSzEyMoLa2to5K7AJgiDmEjbLzEYZz6Qplxdo2V6DWSmC4YhmzZYf69BoGPXznaLUqIMjwaTxK5nf+WQvLEp8rkSIAxoF9tNPP43nn38eLS0t2LZtG7q7u9Ha2prvvhFE0TCXIk2JwifV+5TKlJur9zEQyl3ubr4v6Zjw5czxd960Hvf91zuKqVGzMb/PJpp6NTo6ipKSEmzatAlvvvkmNm/ejB/84Af57htBFAVzLdKUKGy0vE9qAi+X72P3Ob9mTV5OMKv1RatmKzdWq9mIe7++UVHg8+fw5ne5jGpzaaGt13LQypUrcffdd2PVqlX48Y9/jEceeQQZur4J4iOHnHmSIDJF6/vEBJ5U0OTyfayf70K1xw4AqqZrJpjve/wd3PvY2wiGIznti9xYpZ8Fw4m98Oza7O+Lq+XHwKwH0j7z7UjbzDealgwPPvggvF4v6urqcPvtt6OtrQ3f/e538903gigK5lqkKVHYZPs+5aIqFsNm0Wa6VvKpS03T4ckoguFIXpK6qGnycmOQsx7UVDiEdua5bQAwo6Z0za3X1dUBAJqamtDU1JS3DhFEsZHvLTVEYZGtmTXb9yndqlhqY3C6bJpM10qLDNaXkz1+PL67Ey1PvavYB6X7puV+nuxRN91bzUbUVDhE9yQUjmKe2yYIZPZ31g6/73um9mhretKbN28WEqj4fD6Mjo7iuuuuIy2bIDQylyJNidkjV/7jXOUpB9SrYsnBj2FBhQNf2rxMSAWqdj2lRYbVbITZZBAEoFwflO6blvsZDEfw+O5O4fd5bltSJjq2YOgdCog053luG7bdsFYYn9QiwI6bKcuZpjflpZdeEv3e1taG3bt356VDBEEQxUqq7VazEeTkcVnhKbUKEdWpSnnyYzg7MK6qFfOoLTJSmfmV7puW7WvSLGg3bVkuG/TGkGZMM5sMiuZz1v5MPa+MrtDU1IQHHngg130hCIKYk+RKkKbKOT7TuwmC4QhanjqIwZEgPC4Lbr1mTcpSnvwYGJmahPn7qmbmV7pvmVTtWlztEv7GC3yGVHOWLmCkC4+ZtJxpehukOcW7u7tRWkrmPYKYSYLhCDq7hmA36sgPPoOoCdJ0BXmqnOO8tphuLuxM4K856A+h0+tT1FilwvVkjx9PvnwMZ/rHU5qEtW7pUtpOpXTftPjzeT+5FKmJ+6Yty7G42oVgOIqTfWNYUGYTLWD4xCqz8R3UdMXGxkbR701NTbjkkkvy0iGCIJKhvdyZk612rJQVy+OyKmqjatdUyznOhIfBkMiFvXtfd16ftVT7XF5XBoNBh2g0DoNBB4/LKoxH+v6tXOjGv/39n6VMuar07iqZs5WOV7pvWv35P99zVLZNuTKd7LnyGdN6BidEyVhm4zuoerWxsUSFGbkiIGNjY3A6nfnpFUEQIuZa1aCZJBuBm4uFjlJWLOlkriZwtI7vO19Zj31H+vCzFzuS2tVyfrYR42cGxhGNJnJsRKNxDPqDKHNaFN8/LSlXtWzp4jV0ueP5CO5cLLr4eyqNEOcjygf9IRj0OkRjcej1iTSncm3MFKojv/XWW6HT6eDz+fDBBx+grq4O8Xgcp0+fxtatW3HPPffMUDcJ4qPNR3Uvd7YCNxeTv1JWrEF/SBSspSZw6mrKNY9v48oq7N7XrelZ52JBwmuo0veM+W89LmvG71+qLV3SZyHXh1wuutRiB+a5bYhxScH4CmCxGGSf90yiOupHH30UAPC1r30Nzz33HFatWgUA8Hq9JKwJYgZhk9tEJF5QPuxMtT++alM2loVcTf5KRSl4nyaAtIWbkuYnZ6bV4vdWuj9anwMvRKUm/0z9t6m2dEn7K6f1Z2td0ho7wEeIA8CXrzwfz7x2orB82KdPnxaENZBIouL1ehWPb29vx/3334+dO3cKn7W0tGDTpk1ob2/HLbfckvVnBPFRw2o2oq5m7pegZMJBzceb6nxe45Emr0inD9Lo42wnf7mJv8xpSdJ0tU7sWrKOqWnRqTTHTJ4DE6LS/dmD/mDGJmCpYE61gFDT+pXuUaoFiZbYAT5CvLbKieX15bLPe7bQJLBXrFiBb37zm9i6dSuARPWulStXKh7f0NCAw4cPC7+3trairKwMTU1N2Lt3r1DpK9PPmpubMxstQRB5hRcuSj7eVEg1nm03rIXZZBBpsekGOalN/kptpBM0Jl0IaBVuWrKOpfK/ykVA5+I51FQ4hMWSNNmIlHQsKema8VPdo203XJi0IAG0749W2lt9wYr5GBgYFX2e6t3LN5qu+tBDD+GnP/0pdu3aBQD4+Mc/nrIWNr/tq62tTUhn2tjYiLa2NgDI+DMS2AQxN5FuE8rE5ye3b1ZrVitpH5QEnNpknolfWC0ndqrteKmyjmlZZLAIaLY1iZ0PZP4ctJLu/UrXyiFdDEjv0fsnBpOi+OUiwpXaA5IXYezn2cwbLofqFR944AF8+9vfxjPPPAOfz4fa2loAQFdXl/A3LbDCIQDgcrkEc3o2n6lRXm6H0WjQ1DetVFaW5LS9uUKxjguYO2MLhCLoPudH/XwXbJbsv+RzZVxyOF021FY5cbpvDLVVTvzzNzahb3hCGHuqe8HG9uC3L0s6rrNrSDQxT0TiqKtJvhfSPlywYn7StZSCwNK5jpQHv30ZjnUP4/899x5annpXGP8/P7pP6MsPvvlJ1XdAqe9y90Ouv71DAbQ89S4WVDhQU+nAmf5x2eeghc6uIcGn2zsUULwPE5F4WvdLy/NhBEIRbP/h66L7d4Hk/Ms3LsTL75wWfi8rsyv2R649dm3puym9rwy5NnP5/VZDtfXVq1cL/zOhmQnM593Q0AC/3y+0lc1nagwPT6Q8Jh0qK+e+3zATinVcwNwZW673T8+VcanxjzeuEzSYSGgSbrsJY/4ABlLcC+nY2HljU7/bjTqRpmk36hTvBd8Hvg0tpHMdBh8kd6Y/USLydN8YXt3XhdN9Y8Lv73WcS2mOlvZ9gNMI5cbC95dxdmBccCV4XFZ0nhxI+15ouQ+VlSWyx3nPDKtaMeSej5zme+LsiOz9k75j/3jjOsEtYDMo91upPbnvKSsdKqdhszbzkR9BbUGu2vKWLVsAAPX19Whra8OVV16JZ555Bm1tbfibv/kbzR1oamoSNONDhw6hubkZfr8/488IolD4KO6fVgruyUfAV7p9yPV1APUguTVLPGlvh+L7rkUg8H5saQGLTAP/0rkPVrMR2264EO+fGMSaJR4AwHd/tl/ox903b0gZB6A0TrUtYfw94seuFvSn5LqQS45TV1Mu69vm25zp77emJ/eVr3wF119/PbxeL3bt2oXbb78dd911F5577jnZ49vb2+H1etHe3o6GhgY0NzejpaUlySedzWcEUQh8VPdPy5GLe5GNIJZDKVgqneuoBcllux1Pq0Cwmo1YudCNu2/eICpVKRdwJt2HnklWNh4+M1i1x47rLlsia6lVAAAgAElEQVQqMqV3dg/DaTerXk8pm5w0yl96PjDtZ2aoBf1JFzescMm2Gy4UFloA8PjuTmxorEkav/TezfT3W9PbMzo6imuvvRYPPPAArr/+elxyySUYHVU2ETU0NKCzs1P02bZt2wCIBW42nxFEIZCutlbMzLV7kStzplKQHLsGizge8wdStJS6bbktTTzsdyZ4pAFn0n3ochHW2S4qBqaygTH+a89RDPlDqPbYces1a3D/rneT0nvyFcP4bHJyOcZ57f2mLctFwhpIFO8YmwjjSNeQbNlPqzm5nOegP4ibtiwXkuL0DgXQfc4Pt90knKf0vszkO62pdafTiQceeAC7d+/G888/j2eeeQZxLhsMQRCEFnKtIWdDrsyZSpM2P8HXVjnxjzeuU53QlaKXU237kkZAS2s/3/HFdYJ5WDpmaYS11nvA+up02ZIWFRctr8TL73jROxQQZQrrGZzAvzxxAP7xsOh6NRUOUcWw6y9fhgeffV+2Tyd7/CLtPTwZFZm4r798KZ585RgefPaQ7PhT7V/nP6uf7xItspTel5l8pzUJ7Mceewy7du3CD3/4QzidTnR3dwtZ0AiCUCZXWhw/QRYjs1GJLBd7shmp9maf7htTFYZq70mqbV98m2cGkms/lzktQrIP6ZiX15VpqoWtZIZmCxHpguXumzckJW3hzfNAIs2ndBEx6A/BbNKnZWa+8crzAQCLq104MzCO/uFpDb93KCBbsENpkSVdHPH3Qilt60xaizRdpaSkRPBhA8Dtt9+e104RRLGQCy0uXU1NrZ18me5mu0BHJn2QC5Y60jUEAGnv/ZaDn+Brq5yq5myt70mqRYaceV46Zj716PefPIDBkSDcU7Wwv//kgaRgMWlQ3acuqpVdiPD95RcZ0usxzfv2rWuFz6V9VjIzL652Cb7mynIrnn71uCjQLDwZRWW5VRDabpdFsWAHE8L8dVi/g+EIvjW1/Ysvu6mUtnWm9mVrusLu3bvxne98BzqdDm+99RY2btyI++67D5s3b853/wiiYAmGIwhPRjNKrcmTjqam1pd8lefMR4GOXI1PTYjzwVKs6AOb6JnAUuublvSabIJnWbOU+qj1PUnlM031d77PvHl5yB/Cu8f6ZYPF+HzuvUMBPPHSMaEEZ2W5TSjBqQQvBBkGgw4/fPY92ahuQDlLGa+984VYeganS1/Oc9tw2xcaYTYZUO1xiAQra18uspx/HvuO9Anbv9jedtbPxPVmZ/eHpm/Vww8/jFdeeQW33XYbAODll1/GF77wBRLYBKGAVCvZdsNa2QAYLchpaumSz+0n2badi0hbuT7UVDhUFxLS6G6e3qGAbJYxvnqVFg2LCSubRb7+s8dlFQSNu8SM276wBsvryxTfEy05tJV8qtJFzXWXLRH9XRqVxILF+K1qjGg0DpfDhP7hAFqeOqi6SOP3qLM2eLM186VvXFkFQN1Hz4+vd0gcbMY06d6hAJx2c5KGz98zuchy6TtjNOgQicZFx/CLgmwX4pmgOUq8pKQEOp0OQCLjGAWdEYQyUmFgNhmy0miZn25DY03Ooo1zRbZtq2190mrmlutDqoWEdE+uVMNm42D3ntfWMsnPLSf873v8baGdodEwfv7SUdz79Y2y58sVGEm1aODvn/R+mE0GwXxcWW7Fxxrm47V3z6B/OIiyErMQLNY7FMCXNp+PilILnnzlmHCP/OOTKcevtEe9styKoZEQorGEHPnZix3Yva8bN155vuoz48fT6fWJrlXqMGNkPJykSattIWMYDDp4XFbR3yLROL60+XwhgM7Dmdfltu/NBJqucuWVV+JrX/saTp8+jV/84hd48cUXhaQqBEEkkysBKZ2kNzTWZNROPref5KJtq3m6ElkmVaakvlk536icD1kaZMSyZVV7HEkmU16YZJKfW3q9MwPjoiAsIKElKgm/dCO85QQ8fz+qPQ7op5Qw6f8mo14QrgaDDk+8dBTVHju+eMX5QgQ3Q238SnvUeXM2gx2n9L2RjufWa9YIpnmDQYd/+NI6jAUnZTVppWQsjGg0jp7BcSyuns5uVlvlxKbG+djUOD/pfXSXmBGejGVsNcsUTVe6/fbb8cYbb2Dv3r3o6urCLbfcgksuuSTffSOIgiVXAlI6SUv3hqbbp3z52Xg/JR85m24gWDZVpthkrFTmEpA3t/JtrlzoVjSZAmJhcus1a9Dp9WHNEk9Gz5ffe2zQ6xCNxVWFn3QRyGdR87gscFpNonsvfXcG/cGkBQPvs36ns09ksv7S5vMRiUSx67UTQht8BPc8tw23br0Q5Taj4vhrKsQVv/hgPrmocbWAM+l4xoKTaPlGkxA0WOa0YB53benx+470YePKKlHylJ+1dggWg8d3d+KOL67DdZctwcBIEFuazsPYaEDUl203XIh/evxtDPlDePDZ9xUzueULzVe55JJLSEgTRBrkQkBKJ2np3tC5RCYmWynSLT6ptFjpgkA6SfcMjsNsMiS1rbYAkDOZMo1UzjyudWxS8zAAYe/x7ddfKNIO5ZCzCFx32RL8156jGPSHcNcjbyEajQv9kbPy8O+k1CWw5+3pwkoGfUKr1uunr88ELm/JiOn1CIajqpnIlMZy503rBZ+wx2XBnV++SLj/cvdBaTyXXrBA9hr88QaDTjC7s2e1cqEbNzevECVLufex/RgeTewTf/XgGcRicVFQ2qA/KLgK2DkzFXAGpBDYp0+fxo4dOzAyMoJNmzbh2muvxSOPPIKuri68+eab2LNnz4x0kiA+qkgnaZvFmFYhi5kklcmWTzepppFJE2GYTQZhexKvQfrGQkl7bNUyZklNwkoChW/DXWLGl7eswML5JSIBncrXmur+8AFcg/4QxoKTgoVCurWMh7dkSK0A0akAKb4/WiPKpSZq5luOxaaPv2nLclHiEXZ9ZpaWW6TdeOX5Ii2ev09lTgvu/fpG1aQwwLSbQk37loONb9+RPvzsxQ7h3vCadrXHIUruwoQ1APQMyAel8QF4qeqE5xrVEd92221oaGjAVVddhRdeeAE7duzAxz72MWzatAnXX3/9TPWRyJJ87r8l8s9cyg6mhprJVi7dpNK7eOOV5yM8GcXTrx7Hg88ekhUE2264UBSwxRYEP99zVDFjVs/guCjJhpzJnm31YmbqodEwnnnteJKABpR9rVruj7T6E/Ovs7SbAFTNrbKBU1NmdV6I8O+OWkR5MByR7RsvjPk93fz1+YWCdJHG3ydpnXBp/6RJYfgYAgCoKrfhnq9uSLtgzMaVVdi9rztJ02bv1JAkjoBRXWFHNBpH33BAuKdsWxkf6zCTc6vqFbxer1Dg45JLLsGnPvUpfO9738t7p+YSucjAlK3AnAtJKQgiFdJEJFazQRCQAER7ZuU00kBI2X8tFQTvnxhM8n+yvwHJGbPkFgxy3w2RIIrFRW0qJfdwWk3Yd6RPGLPWPdKA2Px74uyISPNWM7fywt9dYsbWK5bh2ddPoH84iFg8jpM9/qSc5mrzgFIAXngyKlg4+OOl5mYm1Ncs8Yh81u4SKy5fVwuXw4Tn//ChUJDkzpvWC9nX5Nqs9tiFe8DoGw7gZI8fKxe6k+6HFLngQqmmzb9TDN6q03j+PGz79z8AQNI9lcY6zNTcqtp6aen0i+JyuUS/fxSQeyCAso9FSxtKZd/ydb7S/tQzA8Wb5pKYHaSJSACkZY7uPudX9F9Ly1RKA67u/PJFsJoNikJVmmTjzMC48DP/mZIgUjLHelxWbPtxG6LROPR6oKLUhr5hZSuC1Foi3WYmNbcqpb9kiyPmEnjm1ePCAqZ/OChK9DHoD4qSnygtmKTa+M/3HE2a+/hj2Va8yeAkF3xnEI6JxmK466dvIRpL3BtmXh/0J1wZ9359Y8pFg7vEjCHOTD0WCOMP750VLY48LqtoTlQSpLymLX2HmOuD3wPfNzwhCsJj95S1l8/cBkqozvhs37XS78WO9IEwk1s6wlPaBu9zk56vpewci1CUW0DwP/O+JmkkKZ/m8lvXXaApw1Ax8FF2DeRi7KnaUPLRajFHA0AoLM72Jf1+8Np7mdMiqxFed9mSJI1QavJlC4aRsbCwd5eZawFxKk3p91s6Ib9/YlAwCcdiCS2QjTndCVzO3CqXKpQx6A8K+4IH/SGRL5b1IdNEH1qEkdVsREWFDbc98Jqsz3rAN90X3hcOJILt9h46hwUV4upm0gXNthvW4V9+/g78E5OoKLPg4V8fERYAbpcVAz5xhL3USsL3XW7nhmjR89px0cKkfv70Fi/+nrL28pnbQAnVb253dzcuvvhiAEA8Hoff78fFF1+MeDwupCktZqRmp+5zY5qFr1wbdotBlNeWF75yPrpBfxAel1W06uYr33R2D+OZ104oajSsP7deswb/94kDGPSHhJcfSKS5ZH2Qnp+JNSHfFJJrgN9LnI5FRK6NbO9/Lsau1gY/Vjk/aCpztPRYPiscM5tKay7zW7KUfL880sk6GI4KUdU6HRCLxUXmWj4wSo01SzyCJq7XJzTufl8w42AkZm4FEnnN+YAtqTlYKjCuu2ypaI+0y2FWTfSh9n7JCSO543mrCPtfmhVNDr0OeOKlo8Lxcr76YDiCh557H/6JSXhcFly6ZgGe/8NJAIkFwIAvMTbeddHZPQyzySC7OOHfUzYOftEjXZjYLOLa2XKLHekiNN+oXqGjoyPvHZjLWM1GISH+0GgYu147LvxNp4Oq8OUn6b/5bAO+97P9mAhFRe3zwvedzj7FxcDVl56HH/+yPal/O1/oEISvVKPhBfFkJIaRqZJ2/olJ6HQAS1THZzPiz8/EmqCGVJtKVxDl2jWgJWI5U/i+8mbVdARlLhcYuTDdKbWh9lzYeanM0VK/rVxWOLUx8PuJWRupTL77jvQJmnE8DvT7pjVVOXOtEmVOi7AXeHldGX747Hspz+HJZlEmZ0LmrWm3X38hHnrufZGLgF9k8XWlpQJTrm2595HXQqs9drhLrJiMxJI7y2G3GjERnM4rLn1efCpT3kXy2sEzKe/Jj54/jGgsjspyK760eRkuWl6VZCafzoNuxWVra1BVPu3G4IU7i11audAt5C9XM7vPBLOvNs1hguEI7n/6IHxj4aS/STOz8sKXF963XrMG//rUQcRSZHL9bdsplDnN8I2F4bKbRIuBXa8elz2HCWsgsYBwl0wH6sgJYqW+M5ifqTwNawKQWvgqaVPpaPLpuBbkkFpLHmvtFPkatfRBK0oRtOkIylz6x/htSpma7pQ0rn1HxAvNQX8wyUcrZ44GpoVLudME6PQYHg0p9k/N/OhxWUXmYC3a7fK6MsGUqtMBTpsJo1PfJ7lsY2qCtcxpwaUXLBAFjfG5yJXiRaRJYqSBWHxlqnluG6o9jiR/ttTvzGt8gLIGKK0rLRfMpRbBze4P00KZ5vq9x/bDN5o8XzL0OuB/33AhfvTf7wsmc4NeJxQQ8Y2FhPmzstwqpE6VmvuVYNp2/3AQT7x0DK8eOJMcTBidPuaZ105Ar4eQwx0A3jveL+RRl1pzGLPhvwZIYKtyZiA5daAWRAXbf/6OaDWpxMj4tPD1T0wKk4ndYtD0osbjUDxOr9chFotDr4PqwoH5mYY1WhO0Cl8136ZWTd7jsgorY2l/5IS3dIKU+qv4PvCuhVxYE0QZrDgNO536ubnyj/HblDxTJRQzWZioaVypsnTJ+Q4BiLJGAUBlmRXbbrhQtl9KbbDxDflDsoFDSvfkoefeRzQWR4nNCKvFiH5fUPh+SMeh1dohfWZOqwnfeWQfBkeCsmVRpUlipJo982kzYaiWqEVL0ho1zg6MK5p1U1UTY8Jsf0evqrAGEvf32OkRUVGNaCyOQX8QVrNB9D70DwdR5jQnzovFhPdMiqfUjHhcJzv/sTkGmDbVs++k0KcYBAsk71ph58sJ49nwXwMksFWRRm1K4U3LSvBacDqwF1NqRldDqSuxKS0iFgd0KscpthuHJjO6kuBzWk0oLzELSQmYJu92WRCejGnSnHsGx0UmTLYIUYoLYBMkML2A6BkcFwlrBu9ayER755EKSJbBip9wlba18DABxSYbRiAU0Sz02dh5oXD/rndFiUbSFdpyGlc0FofLYVYUttJzGdKsUf2+IAb9QcX7Im1DquEPjYbhtJtSjom/J6OBCEYDiQV1LA7ROORMs2raFL+okBb2kCuLWlMhTtohp9mzMStpuHJj6hlUzjPOxlTtcQim4ERWs2N4+Z3TuGnLclR7HLLpXN0lZnzj8w1w2pJT4wbDETyxp1P1vgPTGdR4XA6z4FeWCl1m3fSNyc+jJTYjDHoD+oYDcNlNSfOt02bEw79ux8h4Imht2w1r4S6x4l+fOiDMRwa9DmuWeJJcK0BiuyBvJs80iUuuIIGtgtVsxE1bliclqWfMtYJlagsI9nmmXdYy1kdf6BDMirzg47d0ANM/D/lDeHx3ByrLEoE6SsK3styKiMQvFlNY0AxxEyS/gJBrgyF1LWiNTZBDKiBZBit+wmXa1J1fvihle1Lrw/ZH9+F031hGrgC+2hCbxJnJNt1Jp6bCIdor7R8PqwpbpTb4BXFNpUOzpsL7YJU0fCUzNn9PWIzHkGQcAFQjrJXa5gUsb52rKrcl9e1kjz8p9aeSFUaq0UmPk/59eV1ZkhtE6pqKT80GTDlgdZ95qxCfMGZoNIyHf/2BkJzlpi3LYbSYcKRrCN3nRuGfkLck6nTA1/98BUbHJ4W85Dz+8TBanjqIW69Zo9n0zeAXXP6JSThtRoxN/a7TQfgZSEStjwXCODswLspo9tefXYkypwVW83T1MiBh8fnWdWvR2T2MnsEJvHrwjBDoxvz+M53QSBcvwjqZ/f2jOWvLNxbCvY+9jeHR9E3jxOxSYp/2S6aD0sKHTSZq5n8l86lvLIR7Ht0nWhzwE6qctnvi7Ajue/wd4febr1ohJH7Q2h/WJ6lZVS4oKV1rglxq0Eyiz5nWkk7p0PeOD4gior+0eRk2NVZrDqriBW4wHBUJ5+svXyr4MBl8hDUA1bZZ+7x/+t/+/s+EQhL8c+C57QuNePrV44rt+sZCouA26XFKz5lZc450DSkqH0psu2EtHv3dEUXXoEEPRGXWwaVOM0am4nH+z40XodRpTjI3S3E5TPCPT6LcaUJoMpa0GC8vMUOnS5i+2SLNXWJOLLL90wK4zGnGlg31Ircew24xyLZ7/RXLMBmJ4ulXjmEsEEVZiRnf++tLcPfDb4iEO8+dN12U8YJXjcrKEsW/kYatQjAcwb888Q6GR0OazN9acFoN0Bv0Qi3ZXLX7UUKrWT8TYQ0oPw+lLXVSYSfO9mUUAml4YS21JshFrUu1pjVLPFhQ4cDZqShrrf3hTcm8H5/f4peJNUGaCzqTCYvfxgRAk7k/GI7g8d3iHSwLJFuVxibCqkFV/D2xmo249+sbhe07Dz57SNS2p9QqmImB5ICtvYd6RIsF1qboPbAYccePkrO4Mao9dgA6Ubud3cNw2s3CIoHtyebdS2xsi6tdIosJb81h/uHHd0+brCvLrdBBh77hAMpKzCLfM3M3sWIffJEOqf9XTlgDwMjY9K6UTq8PpQ6zSFjzY2CwOXF4bBJfaT4fj7ceFb7n5SWWqQVhIlmK02oS3DtSfGNhxAHZmB05F+PwaDhpF45vNIx/+I+9ivFHbklOi5nYKgqQwFals3tYMI/kQqjqAIwFowCmX5p4HCixGTAa0O6rZjisRoyrBLRlshgohAXEXOiekt9bGoh36zVr8C9PHIB/XDw58ROHWtT6thsuRM/guHDsZz9xHv7zv8UCRa0/UuHL7zv1T0yKJjWlnQ5yRRiUEl1oRS6H910/+D3ODoxjntuGO764TnHBcLLHL5rsy0vMWFztEmn8bpd20zwbh9lkSPZhuiy4fetaUQDXdZctER3DRyNLg+HYOX/9+UaREGXWkcpyK25uXoHF1S50dg+L2t3xmw8wEYpOaf3LhL5JBd1YIJwUcMZM+G6XRfAP82P74hXL8PTU7hO+9rW7xAy9QS+YfgHxwszjsqLrXGJho6R5VpRZMOwPC6b2n73YAT2Xc4stFtT4rz3HEEfCR33jluVYVlsmvA+Cy0FGWAMJn/QzEu16ZX0ZjnT7RJ+lCsJVEtZ6feK70vL0QdF3ZtYznX3UGVB4ITJF6d2QCmuTQYfJaGqxpCasgcwEbzwOOG0G6HU6RZ8Uoez3lgbi8VqsHDaLQZR6Uaotu0vMMBj06OcyOsktqnjhK90W+N2d+zEyHhYWEC6HWVhAKE1YUu2dmWoBcWKSTLRrObfByR6/YDnoHQokLRjU2r9py3IAEAV5DXFCkeW0Zikty5wWWR+01KfunjInD/qDogAuPjEHg49G5j9j///o2WlTtF6X0GITP+uELVhPvnJMdD5b1PUOBeDtFbv5+Hfg6VdOCC67nsEJdJ3zC1nbhvwJ687ffLZBeH8MBh3CkZgo7SYz+fN75nnLBJ+k5pnXTiQJ67/8xGIAgNlogLvUkqSx8u9ZQ305fv9eD9RgMSqjgQicNlOSK2dsIowyp0kUjFZiM2LzhjohuQrPkW6fYB1gi4B55Xbcs3O/aj8Y65Z5cODY4FTfEp8N+UOCNYLfmpZPSGCrcNHyKjzx0rHUB+YYLcI6n4wpaPu8KbrUYUI0FhcFdWghkyh1ORwWPSZCMaGtVKvlfKK2MOKFtdmoQzgiPjggY6J75HdHhPvKC3OmscQVov3Z+PltgTrd9JaVnsEJtDx9EP7xsPB3LdYdXnsHps21P3/pmMh/DainyWV0dvtEwkwq6ACoai7S/ckL57uw91BPUjEQFtTntJqErGYGgw7/9PWLFX33fJDp0JQ5WbqPfXG1C3d8cZ1oC1IibWa7sD2zoswiLBgSwX5cms74dJYutl97bCIsWPPkePXgacGMLA3MGh4Va+yP7e4UmbiH/CH8y88PCL9Ho3FRbgdm+maWDn4x8lhrJ+756rQvnTe3M3Q64I/vn8XA1Bj16spzSmHN47KbRDtJBv0h/OPDb8rOIX/7+dWo9jjw+ns9ssGu0WgcVrMBo4EInn39BC5bW6O5H7WVTpzqHUsKiGMLi2gsjp7B8bSCLjPBcM8999yT1yvMAhMT6nsBtWI1G6FHHB0SUwoBhCZjCKfIaKQFvS5Z8FjNetE+TTnYosZu1eOqi+vx8TXV+PDsCILh7PuUL6Ix+fFKycV9VVpEsAVCwpJiFC3OlEoFhCbF/XG7zDjQOSBsuRkLTGJpjQs//mU7XnizG2939uH1d8/ixTe7ceBoPzY1zofRkAiHZv58vn8d3cP41EV16OgexujEJCrLrXDaTBgPJoTHZ5oWCecDgNGgx/rlVaj2OLB5fR1anj6IfUf6hL+77Cbc8cV1qChNaNYHjvbj4NEBYdxGgw7vHh8U+r6/ow979nnxdmcfLruwFu8eH0AglEj28qn1dWh56iD6hgPwuCz4hxsvgtNmxpmBcezZ5xWuGYvFMB6cvpcTwSgCoSjsFgP+x9WN+PCsX4ipkI5v8/o6PPTc+wiGlRdOwXAMwXAUbpcF266/EIdPDmJ8alFXWW6FQa/HeDACi0mfci+0TideKF65vg7nhibgsJow6A9i0fwSvN3RDyBhxVta48J4MAK7xQiX3YwDR/sxFhBbjXgXj9yrV2IzJC1WeSrLrfjMJQvRfkrsFghNxnD8zDCMBh3Ck8rfixK7CUtqXPjJrz8QFkm3feECvH98QPT+snllIhhJupbNYhDNOy6HSTi30zuCQCgKh9UAu8WI0GQMpU4TQtx8s6lxPirLsi+o5HAoC32KElchGI7g9v/Ym9ZeaGL2YCtqm1mP67ecj5/9tiMn/niX3UjuAQ45k7zadhw+wpovcSg9/4d//2c4dLQX4ckonnzlGPqHg7Lmd2B6b7BUi2KwbUeLq10IhqOiilplTjOGpqKKpZYKXiu+86b16BkcF0VW33nTRViyoFTkL08V96HX6/Cfd1yOY6cSiwS+qIfbZcGXr1wuinhPBcu13tk9jIGRICpKbWmdD6hbpKQVslj0tstuwre2rsV4cBLhyWhSJH266HTA334usa+bvyeZtCN3/1PF+Ej5xudX4Zd/PCVYXr7y56vwfx9/W/ZYh9UAo0EvWFQqy6347le1pbNNBUWJZ8iZgXES1gUEm7gD4Rh2/iZ3efBJWIuRmxyH/NM7KXQ6oLIskZRDWvjj1mvWJEUas/M/ODUo7DtnMD8qa4MJYnaMnLBm5/HFPFi+b5vFIPKvSociJDHxh/Cn987i9fen81czPyWfHEcuWYeUWCyOdzp6UVFiEaK4+WI+P/3NYdXzpfQNT+DswAReetuLvuEAKsosaQWLpnIfDY2G4bQZBOsLi972T0wKPl93iRlXf/I8/PKPpzAwEoR9ykWVDvF4ouIWCyLjhfXqxeU4fHJY5Wzgs5vqYTObZPd2A6ljfHhcdhOW1ZZj2w3lwta5B59TXgSNS4KHb25eMfvFPz7qSDMREUQu4ZM8FANCcp44cM0nFyMQiqHUYRa0v57BCZwbGsenL16I37adShKWDz59MMmn73FZMBaYFG13Gh4NKqaplMKn/Lz0ggU40jWkeTzP/1EcvBSNxfFW+znM9ziEBYN/YlKTsHr2lWMYnAqAu+OL60TbmsbTFHSP7z4qEs58GUstaIn1UIpjYQyNhvHT33YIsd7pCGu2R5ulb/3De2exvK4MFWVWwbd/4sxIynbe/KAfn920UPN1ldDpEs/x+08eQCweTyt3OTC9S2EmIIGdAj4TEUEwmJmQN6mmG1C3cWUF3mjvF4RUIWyp08pTLx+HbywRySsEwGG6mpIcUmGt1ycE7tOS6OmnXj4masNi0if52XkStZd7cNHyKoQno1kFKO567UTShqRUwspq0gsBcb1DARw7PSxEcWfCXHpHMupKPI6tly3FykXlQjCglICGWJS+4QAe+W32ljR2P3kNfx7BTIsAABTkSURBVMgfQqnDJKrxoEQkGkcwHJ0RDZt82CpIM00RRL5x2vQYC+QncM5kALTKCR0SWaxyEP+WU5Tuj5bFzmzuJOCxmvVzOjhyppDLOlaouF0W/NNfXZx3H3bB6I8tLS1oa2vDjh07ZuyaNRUOVJTmN0yfIHjyJawB7cIaSGhOMyGsS2zpTXCTCh4ELWrHTAlra3JtDBEkrBMUi7AGEhq53PbEXFMQAru1tRVlZWVoamqCz+dDa2vrjFx3ZCws7C0kCCL3jKbpw1czfc8VgpllxCWIlBSED7utrQ1NTU0AgMbGRrS1taG5uVnx+PJyO4xGQ9bX/c0bp7JugyAIgih+li3yoLLCmddrFITA9nq9qKurAwC4XC54vV7V44eHJ1T/rpX55dlvgicIgiCKn7Z3z+DSCxZk3U7B+7Dr6uoEIe33+wXhnW8az/PAaS2IW0QQBEHMIsvryvJ+jYKQRk1NTYLAPnTokKo5PJdYzUZ8bFX2KyaCIAiiuBkazW2xKDkKQmA3NzfD5/Ohra0NAAR/9kxwxfraGbsWQRAEUZhI86vnA9qHnYJgOIIX3urCb/d25axNgiAIorjI1V7sgvdhzxbBcATbH30rpbBOVU4uHeyW7BtzWBMR8k6bHjZzdtHyl6+rhsWUwwESBEEUIUP+EM5M1XTPFwURJT5bnOzxa8rTq5SQwWbWIRBOz4AxEcre4MHK/OUiCUcsCoQmi84IQxAEkVOqPXahmly+IA1bBbX6q1q46uJFuelICrJUolVJp9g8kT8ctFuBIPKKUj14pc9FxwC49Zo1ec8nTrOACmZT+reHPdt5bhvq5in7InKJSt37WcGZZrpJJUpsRnzxU0sFE3+mlDpS5IosAAKhGNwuc1ZtWDJ4nwmi0MjUgacUzVXuNKPMqf7diwPo9PoyvLJ26BuswuJqF9yu9HKJ3/qFRtx500W4++YNWF5fhspyq+ZzS2xG/NVfrBCuOc9tg8uRnfCzmWfmETusiX7qdMi4ZCS/knVYjRgNRPDky8cFE386VJYlkt6UOc3Y1Dg/o/7wsQmzLfRjcWA8y1KcRoNOk7ZAZMZ51TOzQM8HuYzDmW1y7cAbGg3DNxZOedyi+fl//iSwVbCajbjrpvUoL1EX2q6pybzaY8fy+nIsWVAKq9kIq9mIm5tXqJ5rt0xrj6OBCOa5Hfinv7pYEPpfvWpVWn12OYxCf6s9dtz315fg6ksXaT6f/96WOc34SvP5yteyTwsxViw+0z0HdotBdG46xefl6PclSuX5xsJ44U31zHhyOKwGUWyCljJ7cuRyIsw2j/Z4MDojpRlne/K3mmenA0P+QMEuiPh3faYW+cXGQ8+9j2A4v/Xt6cmkoMxpwff/58dhmJqF9HqItOBqjx33fHUj7rzpInznK+uTfBiLq12Y555OcWo16QQBDwB2m1HUVk2FA1azURD6y+vLVLV8vQ64+tLFwu/+8Qj+6i9WCv0pc1rwqfX18JRqM6e6nNN9842FUVVuR9VUilbL/2/v3oLbOK87gP9xvxBYgoQohdGAHlmWOCZEVY5IZwwm9jRSLNJNXY3sBFQaN1RTupNpLT5UHD2EpJWqbsemJlOp7mRsKoXiuo7gKJy004pQMnGT2ISb0BdJFBgrscpWK5eWJZLA8gaCALYP1C4XIAAuCF6wq/N7EQkRH76DvZzvtrv61IOZm5aXxLI9vchi1MB8p7xie3KPXqddkV51MTzOcS0ISVqrBezW9R2NiOa50HOlhKfiRfWs6uWaiSWXvFplNRtlJr0yWz1rsUqcErYM3FQMiTtn3mQSKb1gISkKCTad2ajHsy31aHuyFuWMCdE5Hnrdwtd+OzyLb/zB/VkT/kIvfyHhSnfnJA9s3VyKSqcVwHzS31LJpNTHbNTja1+sTinXbl34HOmcc2RyDs5Ss1gWMP+geACYjct7sLzFqBXnfMoZE/78j3Zm/LuZGC/rUYNNn3WJiX2tRKbmlt2rXgnFcsqy3lk/sNQJ2m414Cu/vxVffuS+FfnehMZSqU356w+UaKmrVZbTEK2rrpD1d08tMSopF2M15BwhXGkVDvOqrxKny7pkqPoUg0qnFSOj04t6wXKYjXrYrEaMcfOXiI1xs3CWmjEaiYoJNtfqQofNhD/7Ug26f3ARwPwcTfr7O79eh49uT4l1S2c0pC7ceuyz98D/n9cAzM85S8trP/gARrmouPM5GRNGueyXt5WYdZiJJZFM8tBqga6WB1FqM4r1AYByuxFjE0vPAwGAzayDXq9DeDKGSqcVf9iwBTvudYrx5+vA57dgk9OCsz/7EOMy6yCXw2YANx1HUnIGK7XqEJle3ogBU2IENxWD02FCIsHnXV+rSbdotMJRokdkKr6suT3NnaaDw27EXJzHRJZRlcjUHH76zg2MT8xCp9MgkeDBWA3Y//C9eCVwNe/PFRrIWo0GG0pNuB2ZhdWsRXQ2uexRC6tJh0ce2Iy+/7q+6P+0mtQkpNMACRX0ltdCqc2AyGTm/aKcMWGMm8WmcgueeGQr3v/wNhJLfLFldpN4vt1UbkGS53FrPIoNDhPGufnOk0az9PQbNz2HnwzcwLFD9Rj44BPs3OrEC6+9j0SSh1YDPPx7lfj5xfyvghE+O/1Ye7S+atVXidOdzmSoqLCD/Wg8Z0JcSjQWx/HvvyMmfWlSlFPeSrz/22cGcHNsBpvKLTj61c/gO69fwo1PJpcsLzw5i7955R2xwQHM9wClO07bk7WITM1h51YnHLbFQ/iXPryNk+cui7/bzHpMSuapSyw6TM0s7PztB3fBaNCJ9ZHWX45SmxGRyRg2lVvwbEu9WEb/4Aj+5ae/k1WG1OOee/Dm4McYn5iFVjs/0mK36PHFehd6fzks/l2mhAkARr0WsXjm0QStVoNkkofdosOE5Dv4XO2n8Nbgx3nXVWCz6ABek/I9r5USsx5T0XheDbV82cw6TM2u3Ly83arHxHQcdose9fdX4I33lHNJo7AP6bQaJJI87BY9dm3bgDcv595/SkxaQKNZ1sJOwTf31+DHb/6PmGAB4ObYTMbzSnhyFpevjcJi0uG7Pw4tqn+l04rOr9cBQEqDX/g5Gkvg8rVRVLsc+M4PL+LW+NL37/7Wn+wWO1fC5+/c6gQA/NU/9ue1/6Sf94TvW6fToPubnoznvnzlutMZJWwZKirsK1JmNBYvOOmv5PttjAWXPvhYVnmZkl1piRGRqZh4kOUqIxqL41nfr8UDrKLMjC/s2iz28oHUUYNM5S2VcEssekzNxFHptOL5v/w8rg7fXhSbtOFTUWZGIsGLPYADD2+B/41rGONmUWYzABotxidmUem04muPbl92D1+rAY7+8Wfw/GvvIVnAujGHzYjwZAxmgwbRAm9mIySn9J+VQgPgG1+6H6f//TfLLuPxhir8W/9Cb1v4fjMx6TWYjRffqZKx6hBPaDA9G4eTMeFI8wOYjM6Jya39u8GcPdrHG6pQZjfj+4Hf5vwcrQbY4LDgk/EZlNkM4KERvyuhAyAdlVvqPCVtgJczJrRL6i333Pab/x3LekwKPfulzk3BKyN570PS89ThJ3biKhvO2lFZDkrYBVqphF1s8o2r0F5++gHWfnAXXv3Jb/MqT1qHXK151+ayrLFJGy5A6skl2/8BQOfpX+WcGkhXYtZh34NV+NzOT8NhM+Hm2DT+7tV3ZS/WS9f2ZC1cn3ZAm0zi+dfew82xGThsBvB8fqvYN5ZZcHDPtpQRD+EkZLfoceCRe/Gvbw0jnGWYs1iU2Y1I8kBExiU3mezZvRmXro3idjias9GSafjVYTPi4N5tOPeLa2Ij1GbVY7LAhk+Z3VjQtI20Nwkga49WLptZh0fv7MNmo048HoZHuJRjOf1z5ViJDoicc0GusrMlfel2qCgzQ6vR5F32clHCLhAl7AWFHGTpCT996EtuebkSrmA1tll4chbH/unXshNu+8FduP+e8pTXorE4rl4fxysXruZ1Yq4oM+Pbhx4UGyLp38HwCIfYXAJT0Tl87z8+WJRgSm0GfHXvdtgsBmypZAAgZ+NLqOf1m5P4+cX/w/jE/KhDEhpEJmMoLTEgkeSXfc29xaTDjGTqwGEzYGJ6Dok8RyBsFj10Ws2iBkv6nHQupSWGJRs8wtoCKeF7GxmdXxlcbjej43u/QiIhb44VmJ9qeevKx2JvUCgvNpfAaz/7ndgYyDVPLNBqgBN/0ZCxpyc99hirHlyOhsXjDVXYUlkKo0GXdX1NpmN5tedvM5FzLljq/UJPXxjeFrbDTJxHODItHi+FNC7yQQm7QJSwV06hrWq5Vis26Xy+w2YAJEODTIkeOq1OHEbPdRKLxuIYHuEwORMTh+EdJXokeA0mpudQbjfCu2cbjHptyolTTlzhyVm8e/UWmBLDoven10HOtsh2Urx6fRwnzw3m8e3NK7Ob0PbkTpz60WWMcbMoZ0x46tHqlB5/PuxWA7xf2IreXw7PL+hkTHjmiZ34h95BjEYWz3GaDNqc17TbLDo079kmlicMfZ7wX1xUXrYebbXLgb8/dynnmgth3lPac02fvhke4QAAlc6ShVEVuxHhLI29XD1dYTs6GbNYVroNDhP++k/lPXEqGotjOs7DqtesS7JeKdLvRdpoXa/zPiXsAlHCVp7VjC1T7xbAslvi+fQSimmbSXsncnuUdut8TzrT1Qh/++p7uPHJJDaVWzAXT6YsckyX/nnOUjO+9dTuRaMEwyMczgQ+EHuqwtzmUtoP7sKWSmbRdMnwCIdXLlwVh0flNMpicwmc+8V/46NbU2CshpQRmnyGkqWJpfsH72NkdDqlkZdPT1daN0GuHnU2xbQ/rjRK2GuEErY8ao0LUG9sxRaXkERsZoPYA5WbvNOTlXQRZDSWwHP//C5GI1FxVX46YSV6tvKkdZT2VIVkl77iVyrTdEZ6zPk0yoTYpMm2kKHkQoeCV0qx7Y8riRL2GqGELY9a4wLUG1sxx5UpeUuvo01fvJOerNJjk/YohXnds298KL5fOkydb+8yUz15fv4mQdJLAVeKNLa1mhZaC8W8PxaKEvYaoYQtj1rjAtQbm1LiyrXaPluykhNberJbyUsdc9WtUErZbvlSa1xAcSZsZTfvCCFFKf1OgNl+LrTcfO44KKe8QsoiZLXRvcQJIYQQBaCETQghhCiAKuewCSGEELWhHjYhhBCiAJSwCSGEEAWghE0IIYQoACVsQgghRAEoYRNCCCEKQAmbEEIIUQBK2IQQQogC6I4dO3ZsvStRrLq7u6HRaBAIBLB79+71ro5soVAIR48exf79+8XXMsUi97Vic/jwYXR1dSESiaChoQGAeuLr7u7G6dOncevWrbzjKPbYWJZFZ2cnmpqaAKgnrp6eHjz33HPw+/1oamqCyWRSTWx+vx83btzAxo0bVROX3+9HV1cXAoEAXnzxRWzcuBH33XefImKjHnYWgUAADocDHo8H4XAYgUBgvaskm9vtxpUrV8TfM8Ui97ViEwwGcerUKQwMDOD1118Hy7KqiY9lWbS3t8Pn86Gvrw+AuradtE5qiiscDqO3txe9vb1gGEY1sXV1dWHHjh1obGxUVVwejwe9vb3w+XzYt28fPB6PYmKjhJ1FMBiEy+UCANTW1iIYDK5zjfJTWrrwEINMsch9rdh4PB7x5x07dsDlcqkmPqFuoVAIXq8XgHq2XSAQQGNjo/i7WuJiWRZDQ0Oorq4WT95qiC0YDIJlWbAsK9ZLDXEBC8cZAExMTIBhGMXERk/ryoJlWXHDMAwDlmXXuUbLly0Wua8VI47jUFNTA0Bd8bEsi5deeglDQ0Pwer2qiC0UCsHtdqc0ItUQFzBfN5/Ph1AohJaWFng8HlXE1t/fj5qaGng8HrS1tYHjOFXEJSXsl4By9kfqYWfhcrnEjcFxXEqrTGkyxSL3tWLl9/vR3t4OQF3xuVwunDp1CjU1NQiFQqqI7cSJE+jp6UFHRwfefvtt+P1+VcQl5Xa70dTUJJ74lR7bxMQEamtrwTAMvF6v2LNUelxS58+fF9dTKCU26mFnIbSUAWBwcDBlOE9pMsUitJiXeq0YBYNBcciY4zjVxQdAPDmoITafzwdgflt1dHTA6/UiEAgoPq5M3G63KraZ2+1OqVN6slJqXFIsy4JhGADKOUdSDzuLxsZGhMNhcY5COnda7EKhEFiWRSgUApA5FrmvFZtAIICuri60tbXhwIEDCAaDqolPiC0QCKChoQEMw6gmtnRqiaunpwddXV0pjUg1xCbEEggEMDg4iNbWVlXEJWBZFrW1teLvSomNHq9JCCGEKAD1sAkhhBAFoIRNCCGEKAAlbEIIIUQBKGETQgghCkAJmxBCCFEAStiE3AWqq6uX9b5iuw80IXczStiEkIw4joPf71/vahBC7qA7nRFyFwkGgwgEAgiHwxgaGsK+ffvQ3t4OjuPQ0tICjuPAMAzOnDmDjo4OXLlyBT09PWhtbcXhw4cxNDQk3q5SuGVlpvIAiDcUYRgGx48fh9vtRnd3Ny5cuACGYfD0008X7V2wCClKPCFE9bZv387zPM/39/fzdXV14uvCzy+//DL/wgsv8DzP82fPnuX7+vr4SCTCt7S08DzP89evX+c7Ozt5nuf5SCTC79mzJ2d5fX19/DPPPCP+TUtLi/ivUEZdXR0fiURWLWZC1IZ62ITcZR566CHxZ+HhBcJTmQCgubkZLpcLHMel/F1raysCgQCCwSAikUjO8oLBIB577DGxbI/Hg+7ubrAsi0OHDol/z7Ks+MQkQkhuNIdNyF3G4XAses3tdqO3txdVVVVoa2tbNHcdDAbFhH7kyJGUR2VmKi8cDqf8LjwwobW1FT6fDz6fDwMDA5SsCckDJWxCCLq7u9HX1wev14sjR46gv78/5f/7+/vR1NSExsZGsCyb0sPOpKGhAefPnwcw/zCatrY2NDQ0iKvOOY5DfX396gRDiErRkDghBM3NzTh06BB6enrERWIMw4BlWfT09KC5uVlcRFZTUwOXyyU+1zoTr9eLUCiEvXv3AgBOnjwJt9uN/v5+8bXjx4+vWXyEqAE9rYsQQghRABoSJ4QQQhSAEjYhhBCiAJSwCSGEEAWghE0IIYQoACVsQgghRAEoYRNCCCEKQAmbEEIIUQBK2IQQQogC/D/LuG44QEFD9QAAAABJRU5ErkJggg==
plt.show()
plt.style.use('presentation')
plt.figure(figsize=(7,5))
a = plt.subplot(211)
plt.plot(df.Quality.values, "*", label="True")
plt.plot(model.values, ".", label="Model", color="C1")
plt.legend(fontsize=16)
plt.ylabel("Quality")
plt.title("Multivariate Linear Regression")
plt.subplot(212, sharex=a)
plt.plot(res/df.Quality.values, ".")
#plt.legend()
plt.xlabel("Instance")
plt.ylabel("Residual\nFraction")
plt.tight_layout()
plt.savefig("linear_regression_test.pdf")
# LINEAR REGRESION with columns parameters changed
df = pd.read_csv("random_forest/float_quality.csv")
df.columns = df.columns.str.strip()
#df.Resolution = df.Resolution.str.replace("k","").astype(float) * 1000
#df.Band = df.Band.str.strip()
#df = df[df.Band =="K"]
print(df.head())
len(df)
data_table = df[["Temp", "logg", "[Fe/H]", "Resolution", "Band", "vsini"]]
expected = df["Quality"].astype(float)
data_table = data_table.astype(np.float)
# Limit broadening
modified_data_table = datatable
mod_expected = expected
modified_data_table = modified_data_table[modified_data_table["Resolution"]==100000]
modified_data_table = modified_data_table[modified_data_table["vsini"]==1]
mod_expected = mod_expected [modified_data_table["Resolution"]==100000]
mod_expected = mod_expected [modified_data_table["vsini"]==1]
[intercept_, coef_, model, res] = sk_linearReg(modified_data_table, mod_expected)
print(modified_data_table.columns)
print(coef_)
plt.style.use('presentation')
plt.figure(figsize=(7,5))
a = plt.subplot(211)
plt.plot(mod_expected.values, "*", label="True")
plt.plot(model.values, ".", label="Model", color="C1")
plt.legend(fontsize=16)
plt.ylabel("Quality")
plt.title("Multivariate Linear Regression")
plt.subplot(212, sharex=a)
plt.plot(res/mod_expected, ".")
#plt.legend()
plt.xlabel("Instance")
plt.ylabel("Residual\nFraction")
plt.tight_layout()
#plt.savefig("linear_regression_test.pdf")
# broadening to power 1.5
modified_data_table = datatable
mod_expected = expected
modified_data_table["Resolution"] = modified_data_table["Resolution"] ** 1.5
modified_data_table = modified_data_table[modified_data_table["vsini"]==1]
#mod_expected = mod_expected [modified_data_table["Resolution"]==100000]
mod_expected = mod_expected [modified_data_table["vsini"]==1]
[intercept_, coef_, model, res] = sk_linearReg(modified_data_table, mod_expected)
print(modified_data_table.columns)
print(coef_)
plt.style.use('presentation')
plt.figure(figsize=(7,5))
a = plt.subplot(211)
plt.plot(mod_expected.values, "*", label="True")
plt.plot(model.values, ".", label="Model", color="C1")
plt.legend(fontsize=16)
plt.ylabel("Quality")
plt.title("Multivariate Linear Regression")
plt.subplot(212, sharex=a)
plt.plot(res/mod_expected, ".")
#plt.legend()
plt.xlabel("Instance")
plt.ylabel("Residual\nFraction")
plt.tight_layout()
#plt.savefig("linear_regression_test.pdf")
```
| github_jupyter |
```
from __future__ import print_function
from __future__ import division
FASTPART=False
if FASTPART:
num_frames = 4
else:
num_frames = 16
is_alchemy_used = True
from datetime import datetime
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
from skimage import io, transform
import torch
from torch.utils import data
from torch.utils.data import DataLoader, SubsetRandomSampler,Dataset
from random import randint
from tqdm import tqdm
from PIL import Image
from random import shuffle
if is_alchemy_used:
from catalyst.dl import SupervisedAlchemyRunner as SupervisedRunner
else:
from catalyst.dl import SupervisedRunner
import random
from scipy import ndimage
import torch.nn as nn
import torch.nn.functional as F
from network.models import model_selection
import math
import cv2
from albumentations import Compose, RandomCrop, Normalize, HorizontalFlip, Resize, RandomResizedCrop, CenterCrop,PadIfNeeded
from albumentations.pytorch import ToTensor
from alchemy import Logger
token = "d1dd16f08d518293bcbeddd313b49aa4"
print("PyTorch Version: ",torch.__version__)
print("Torchvision Version: ",torchvision.__version__)
def seed_everything(seed=12345):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
# seed_everything()
from typing import Callable, List, Tuple
import os
import torch
import catalyst
from catalyst.dl import utils
print(f"torch: {torch.__version__}, catalyst: {catalyst.__version__}")
# os.environ["CUDA_VISIBLE_DEVICES"] = "0" # "" - CPU, "0" - 1 GPU, "0,1" - MultiGPU
SEED = 42
utils.set_global_seed(SEED)
utils.prepare_cudnn(deterministic=True)
BASE_DIR = f'/home/{os.environ["USER"]}/projects/dfdc'
DATA_DIR = os.path.join(BASE_DIR, 'data/dfdc-videos')
HDF5_DIR = f'/home/{os.environ["USER"]}/projects/dfdc/data/dfdc-crops/hdf5'
IMG_DIR = f'/home/{os.environ["USER"]}/projects/dfdc/data/dfdc-crops/webp'
# Models to choose from [resnet, alexnet, vgg, squeezenet, densenet, inception]
model_name = "resnet"
# Number of classes in the dataset
num_classes = 2
# Batch size for training (change depending on how much memory you have)
batch_size = 24#24#32
# Number of epochs to train for
num_epochs = 10
# Flag for feature extracting. When False, we finetune the whole model,
# when True we only update the reshaped layer params
feature_extract = False
def set_parameter_requires_grad(model, feature_extracting):
if feature_extracting:
for param in model.parameters():
param.requires_grad = False
def initialize_model(model_name, num_classes, feature_extract, use_pretrained=True):
# Initialize these variables which will be set in this if statement. Each of these
# variables is model specific.
model_ft = None
input_size = 0
if model_name == "resnet":
""" Resnet18
"""
model_ft = models.resnet18(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, num_classes)
input_size = 224
elif model_name == "alexnet":
""" Alexnet
"""
model_ft = models.alexnet(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
input_size = 224
elif model_name == "vgg":
""" VGG11_bn
"""
model_ft = models.vgg11_bn(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
input_size = 224
elif model_name == "squeezenet":
""" Squeezenet
"""
model_ft = models.squeezenet1_0(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1,1), stride=(1,1))
model_ft.num_classes = num_classes
input_size = 224
elif model_name == "densenet":
""" Densenet
"""
model_ft = models.densenet121(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.classifier.in_features
model_ft.classifier = nn.Linear(num_ftrs, num_classes)
input_size = 224
elif model_name == "inception":
""" Inception v3
Be careful, expects (299,299) sized images and has auxiliary output
"""
model_ft = models.inception_v3(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
# Handle the auxilary net
num_ftrs = model_ft.AuxLogits.fc.in_features
model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)
# Handle the primary net
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs,num_classes)
input_size = 299
else:
print("Invalid model name, exiting...")
exit()
return model_ft, input_size
def my_initialize_model(file_checkpoint, model_name, feature_extract, emb_len):
model, input_size = initialize_model(model_name, 2, feature_extract, use_pretrained=True)
# model = model.to(device)
if file_checkpoint != None:
print(f'Loading checkpoint {file_checkpoint}')
checkpoint = torch.load(file_checkpoint)#, map_location=device)
model.load_state_dict(checkpoint['model_state_dict'])
_ = model.eval()
if file_checkpoint != None:
del checkpoint
# emb_len = 128
if emb_len > 2:
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, emb_len)
return model, input_size
# model, input_size = initialize_model(model_name, num_classes, feature_extract, use_pretrained=True)
def create_filter(kernel_size = 7, sigma = 3, channels = 3):
# Create a x, y coordinate grid of shape (kernel_size, kernel_size, 2)
x_cord = torch.arange(kernel_size)
x_grid = x_cord.repeat(kernel_size).view(kernel_size, kernel_size)
y_grid = x_grid.t()
xy_grid = torch.stack([x_grid, y_grid], dim=-1)
mean = (kernel_size - 1)/2.
variance = sigma**2.
# Calculate the 2-dimensional gaussian kernel which is
# the product of two gaussian distributions for two different
# variables (in this case called x and y)
gaussian_kernel = (1./(2.*math.pi*variance)) *\
torch.exp(
-torch.sum((xy_grid - mean)**2., dim=-1) /\
(2*variance)
)
# Make sure sum of values in gaussian kernel equals 1.
gaussian_kernel = gaussian_kernel / torch.sum(gaussian_kernel)
# Reshape to 2d depthwise convolutional weight
gaussian_kernel = gaussian_kernel.view(1, 1, kernel_size, kernel_size)
gaussian_kernel = gaussian_kernel.repeat(channels, 1, 1, 1)
gaussian_filter = nn.Conv2d(in_channels=channels, out_channels=channels,
kernel_size=kernel_size, groups=channels, bias=False
, padding=(int(kernel_size/2),int(kernel_size/2))
)
gaussian_filter.weight.data = gaussian_kernel
gaussian_filter.weight.requires_grad = False
return gaussian_filter
def k_to_ij(num_frames, k):
num_in_row = int(np.sqrt(num_frames))
q = 0
for i in range(num_in_row):
for j in range(num_in_row):
if q == k:
return (i, j)
q += 1
def ij_to_k(num_frames, i_in, j_in):
num_in_row = int(np.sqrt(num_frames))
q = 0
for i in range(num_in_row):
for j in range(num_in_row):
if i == i_in and j == j_in:
return q
q += 1
class NetLstm(nn.Module):
def __init__(self, checkpoint_file, model_name, emb_len, hidden_dim):
super(NetLstm, self).__init__()
self.backbone, self.input_size = my_initialize_model(checkpoint_file, model_name, False, emb_len)
self.lstm = nn.LSTM(emb_len, hidden_dim)
self.hidden2tag = nn.Linear(hidden_dim, 2)
# self.out2tag = nn.Linear(self.input_size, 2)
self.filter = create_filter(kernel_size = 7, sigma = 3, channels = 3)
def forward(self, sentences):
self.lstm.flatten_parameters()
tag_scores_list = torch.zeros((sentences.shape[0], 2), dtype=torch.float32 ).cuda()
# print(tag_scores_list.shape)
for i, sentence in enumerate(sentences):
sentence = sentence.permute(0, 3, 1, 2)
embeds = self.backbone(sentence - self.filter(sentence))
# print(embeds.shape)
lstm_out, _ = self.lstm(embeds.view(len(sentence), 1, -1))
# print(lstm_out.shape)
tag_space = self.hidden2tag(lstm_out.view(len(sentence), -1))
# print(tag_space.shape)
tag_scores_list[i] = tag_space[-1,:]
return tag_scores_list
class NetRes(nn.Module):
def __init__(self, checkpoint_file, model_name):
super(NetRes, self).__init__()
self.backbone, self.input_size = my_initialize_model(checkpoint_file, model_name, False, 2)
self.filter = create_filter(kernel_size = 7, sigma = 3, channels = 3)
self.norm = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
def forward(self, sentences):
tag_scores_list = torch.zeros((sentences.shape[0], 2), dtype=torch.float32 ).cuda()
# print(tag_scores_list.shape)
for i, sentence in enumerate(sentences):
sentence = sentence.permute(0, 3, 1, 2)
sentence = sentence - self.filter(sentence)
for j in range(sentence.shape[0]):
sentence[j] = self.norm(sentence[j])
embeds = self.backbone(sentence)
# print(embeds.shape)
tag_scores_list[i] = embeds.mean(axis=0)
return tag_scores_list
class NetResThr(nn.Module):
def __init__(self, checkpoint_file, model_name, emb_len, num_frames=4 ):
super(NetResThr, self).__init__()
self.backbone, self.input_size = my_initialize_model(checkpoint_file, model_name, False, emb_len)
self.emb_len = emb_len
self.num_frames = num_frames
self.img_in_row = int(np.sqrt(self.num_frames))
self.sz_in_row = int(self.input_size/self.img_in_row)
# self.filter = create_filter(kernel_size = 7, sigma = 3, channels = 3)
self.norm = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
self.fc = nn.Linear(self.num_frames * emb_len, 2)
def forward(self, sentences):
tag_scores_list = torch.zeros((sentences.shape[0], 2), dtype=torch.float32 ).cuda()
for k, sentence in enumerate(sentences):
sentence = sentence.permute(0, 3, 1, 2)
sentence_generated = torch.zeros(sentence.shape, dtype=torch.float32).cuda()
for frame_out in range(num_frames):
for frame_in in range(num_frames):
for pt_out in range(num_frames):
i_in, j_in = k_to_ij(self.num_frames, frame_out)
i_out, j_out = k_to_ij(self.num_frames, pt_out)
sentence_generated[frame_out,:,i_out*self.sz_in_row:(i_out+1)*self.sz_in_row, j_out*self.sz_in_row:(j_out+1)*self.sz_in_row] = \
sentence[frame_in, :, i_in*self.sz_in_row:(i_in+1)*self.sz_in_row, j_in*self.sz_in_row:(j_in+1)*self.sz_in_row]
sentence_generated[frame_out] = self.norm(sentence_generated[frame_out] )
embeds = self.backbone(sentence_generated)
embeds = torch.flatten(embeds)
embeds = self.fc( embeds )
tag_scores_list[k] = embeds#.mean(axis=0)
return tag_scores_list
# model = NetLstm('/home/kb/Documents/best0.pth', 'resnet', 16, 16)
# model = NetLstm(None, 'resnet', 4, 4)
# model = NetRes(None, 'resnet')
emb_len = 32
model = NetResThr(None, 'resnet', emb_len, num_frames)
input_size = model.input_size
import math
# import os
import gc
import sys
import time
from pathlib import Path
from functools import partial
from typing import Callable, Dict, Iterator, List, Optional, Tuple, Union
# from tqdm.notebook import tqdm
import cv2
import h5py
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import torchvision
from torch import Tensor
sys.path.insert(0, os.path.join(BASE_DIR, 'src'))
from dataset.utils import read_labels
from prepare_data import get_file_list
def show_images(images, cols = 1, titles = None):
"""Display a list of images in a single figure with matplotlib.
Parameters
---------
images: List of np.arrays compatible with plt.imshow.
cols (Default = 1): Number of columns in figure (number of rows is
set to np.ceil(n_images/float(cols))).
titles: List of titles corresponding to each image. Must have
the same length as titles.
"""
assert((titles is None)or (len(images) == len(titles)))
n_images = len(images)
if titles is None: titles = ['Image (%d)' % i for i in range(1,n_images + 1)]
fig = plt.figure()
for n, (image, title) in enumerate(zip(images, titles)):
a = fig.add_subplot(cols, np.ceil(n_images/float(cols)), n + 1)
if image.ndim == 2:
plt.gray()
plt.imshow(image)
a.set_title(title)
fig.set_size_inches(np.array(fig.get_size_inches()) * n_images)
plt.show()
def check_len_hdf5(path):
lens = dict()
for name in os.listdir(path):
full_path = os.path.join(path, name)
if os.path.isfile(full_path):
with h5py.File(full_path, 'r+') as f:
lens[name] = len(f)
return lens
def check_len_images(path):
lens = dict()
for name in os.listdir(path):
full_path = os.path.join(path, name)
if os.path.isdir(full_path):
lens[name] = len(os.listdir(full_path))
return lens
def sparse_frames(n: int, total: int) -> np.ndarray:
idxs = np.linspace(0, total, min(n, total), dtype=int, endpoint=False)
rnd_shift = np.random.randint(0, (total - idxs[-1]))
return idxs + rnd_shift
def rnd_slice_frames(n: int, total: int, stride=1.) -> np.ndarray:
idxs = np.arange(0, total, stride)[:n].astype(np.uint16)
rnd_shift = np.random.randint(0, (total - idxs[-1]))
return idxs + rnd_shift
def create_mask(idxs: np.ndarray, total: int) -> np.ndarray:
mask = np.zeros(total, dtype=np.bool)
mask[idxs] = 1
return mask
def pad(frames: np.ndarray, amount: int, where :str='start') -> np.ndarray:
dims = np.zeros((frames.ndim, 2), dtype=np.int8)
pad_dim = 1 if where == 'end' else 0
dims[0, pad_dim] = amount
return np.pad(frames, dims, 'constant')
class FrameSampler():
def __init__(self, num_frames: int, real_fake_ratio: float,
p_sparse: float):
self.num_frames = num_frames
self.real_fake_ratio = real_fake_ratio
self.p_sparse = p_sparse
def __call__(self, label: Tuple[int, bool]) -> Callable[[int], np.ndarray]:
dice = np.random.rand()
if dice < self.p_sparse:
return partial(sparse_frames, self.num_frames)
else:
# Stored frames: fake - 30, real - 150,
# the real_fake_ratio should be set to 150 / 30 = 5
# stride for fake: 5 - (4 * 1) = 1
# stride for real: 5 - (4 * 0) = 5
n = self.real_fake_ratio
stride = n - ((n-1) * int(label))
return partial(rnd_slice_frames, self.num_frames, stride=stride)
# sampler = FrameSampler(num_frames=15, real_fake_ratio=100/30, p_sparse=1.)
class ImagesDataset(torch.utils.data.Dataset):
def __init__(self, base_path: str, size: Tuple[int, int],
sampler: FrameSampler,
sub_dirs: Optional[List[str]]=None):
self.base_path = base_path
self.size = size
self.sampler = sampler
self.df = ImagesDataset._read_annotations(base_path, sub_dirs)
@staticmethod
def _read_annotations(base_path: str,
sub_dirs: Optional[List[str]]) -> pd.DataFrame:
if not os.path.isdir(base_path):
raise RuntimeError('Unable to access %s' % base_path)
parts = []
load_all = sub_dirs is None
if load_all:
sub_dirs = os.listdir(base_path)
for chunk_dir in sub_dirs:
chunk_path = Path(base_path)/chunk_dir
if not chunk_path.is_dir():
if not load_all:
print('Invalid dir: %s' % str(chunk_path))
continue
files = os.listdir(chunk_path)
df = pd.DataFrame(files, columns=['video'])
df['label'] = df['video'].str.endswith('_1')
df['dir'] = chunk_dir
parts.append(df)
if len(parts) < 1:
raise AttributeError('No images were found')
return pd.concat(parts).reset_index()
@staticmethod
def read_image_folder(path: str, num_frames: int, size: int,
sample_fn: Callable[[int], np.ndarray]) -> np.ndarray:
img_size = (size, size)
images = []
files = sorted(os.listdir(path))
total_frames = len(files)
if total_frames > 0:
idxs = sample_fn(total_frames)
pick = create_mask(idxs, total_frames)
for i, file in enumerate(files):
if pick[i]:
img_path = os.path.join(path, file)
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if img.shape[0] > input_size:
img = img[int(img.shape[0]/2)-int(input_size/2):int(img.shape[0]/2)+int(input_size/2),:,:]
if img.shape[1] > input_size:
img = img[:, int(img.shape[1]/2)-int(input_size/2):int(img.shape[1]/2)+int(input_size/2),:]
img = PadIfNeeded(min_height=input_size, min_width=input_size)(image=img)['image']
# img = cv2.resize(img, img_size,
# interpolation=cv2.INTER_NEAREST)
images.append(img)
return np.stack(images)
else:
return np.empty((0, size, size, 3), dtype=np.uint8)
def __len__(self) :
return len(self.df)
def __getitem__(self, idx) -> Tuple[np.ndarray, int]:
num_frames, size = self.size
meta = self.df.iloc[idx]
label = int(meta.label)
path = os.path.join(self.base_path, meta.dir, meta.video)
if os.path.isdir(path):
sample_fn = self.sampler(meta.label)
frames = ImagesDataset.read_image_folder(
path, num_frames, size, sample_fn=sample_fn)
else:
print('Dir not found: {}'.format(path))
frames = np.zeros((num_frames, size, size, 3), dtype=np.uint8)
if len(frames) > 0:
pad_amount = num_frames - len(frames)
if pad_amount > 0:
frames = pad(frames, pad_amount, 'start')
else:
print('Empty file {}'.format(path))
frames = np.zeros((num_frames, size, size, 3), dtype=np.uint8)
frames = np.array(frames, dtype=np.float32)
tr = Compose([
CenterCrop(170, 80),
Resize(input_size, input_size, interpolation=3, p=1),
# RandomResizedCrop(input_size, input_size, scale=(0.2, 0.3),
# ratio=(0.8, 1.2),
# interpolation=3, always_apply=True, p=1.0),
Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
# ToTensor()
])
frames =np.asarray([tr(image=frame)['image'] for frame in frames ], dtype=np.float32)
# print(frames.shape)
# kernel = np.array([[-1, -1, -1],
# [-1, 8, -1],
# [-1, -1, -1]])
# for i in range(frames.shape[0]):
# for j in range(3):
# frames[i,:,:,j] = ndimage.convolve(frames[i,:,:,j], kernel)
return frames, label
def shuffled_idxs(values: np.ndarray, val: int) -> List[int]:
idxs = (values == val).nonzero()[0]
idxs = np.random.permutation(idxs)
return idxs
class BalancedSampler(torch.utils.data.RandomSampler):
def __init__(self, data_source, replacement=False, num_samples=None):
super().__init__(data_source, replacement, num_samples)
if not hasattr(data_source, 'df'):
raise ValueError("DataSource must have a 'df' property")
if not 'label' in data_source.df:
raise ValueError("DataSource.df must have a 'label' column")
def __iter__(self):
df = self.data_source.df
all_labels = df['label'].values
uniq_labels, label_freq = np.unique(all_labels, return_counts=True)
rev_freq = (len(all_labels) / label_freq)
idxs = []
for freq, label in zip(rev_freq, uniq_labels):
fraction, times = np.modf(freq)
label_idxs = (all_labels == label).nonzero()[0]
for _ in range(int(times)):
label_idxs = np.random.permutation(label_idxs)
idxs.append(label_idxs)
if fraction > 0.05:
label_idxs = np.random.permutation(label_idxs)
chunk = int(len(label_idxs) * fraction)
idxs.append(label_idxs[:chunk])
idxs = np.concatenate(idxs)
idxs = np.random.permutation(idxs)[:self.num_samples]
return iter(idxs.tolist())
def get_loader(num_frames=15, real_fake_ratio=1, p_sparse=0.5, input_size=input_size, img_dir=None, sub_dirs=None):
sampler = FrameSampler(num_frames, real_fake_ratio=real_fake_ratio, p_sparse=p_sparse)
ds = ImagesDataset(img_dir, size=(num_frames, input_size), sampler=sampler,
sub_dirs =sub_dirs)
print(len(ds))
s = BalancedSampler(ds)
batch_sampler = torch.utils.data.BatchSampler(
BalancedSampler(ds),
batch_size=batch_size,
drop_last=True
)
dl = torch.utils.data.DataLoader(ds, batch_sampler=batch_sampler)
return dl
loaders = {}
loaders['train'] = get_loader(num_frames=num_frames, real_fake_ratio=100/30, p_sparse=1.0, input_size=input_size,
img_dir='/home/kb/projects/dfdc/data/dfdc-crops/webp',
sub_dirs= ['dfdc_train_part_%d' % i for i in [1,5,10,15,20,25,30,35]]
)
loaders['valid'] = get_loader(num_frames=num_frames, real_fake_ratio=100/30, p_sparse=1.0, input_size=input_size,
img_dir='/home/kb/projects/dfdc/data/dfdc-crops/webp',
sub_dirs= ['dfdc_train_part_%d' % i for i in range(40,50)]
)
# loaders['test'] = get_loader(num_frames=num_frames, real_fake_ratio=100/30, p_sparse=1.0, input_size=input_size,
# img_dir='/home/kb/projects/dfdc/data/dfdc-crops/webp')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
project = 'dfdc_v2_resnet'
num_epochs = 25
group = datetime.now().strftime("%m_%d_%Y__%H_%M_%S")
if FASTPART:
group = f'fast_{group}'
expnum = 0
experiment = f"exp{expnum}"
logdir = f"/home/kb/hdd/logs/deepfake/{project}/{group}/{experiment}"
model = model.to(device)
params_to_update = model.parameters()
if feature_extract:
params_to_update = []
for name,param in model.named_parameters():
if param.requires_grad == True:
params_to_update.append(param)
else:
for name,param in model.named_parameters():
if param.requires_grad == True:
pass
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.AdamW(params=model.parameters(), lr=0.00001)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer)
# model runner
runner = SupervisedRunner()
print(f'----------------Experiment: {experiment}')
logger = Logger(
token=token,
experiment=experiment,
group=group,
project=project,
)
logger.close()
runner.train(
model=model,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
loaders=loaders,
logdir=logdir,
num_epochs=num_epochs,
verbose=True,
monitoring_params={
"token": token,
"project": project,
"experiment": experiment,
"group": group,
}
)
# num_frames = num_frames
# img_in_row = int(np.sqrt(num_frames))
# sz_in_row = int(input_size/img_in_row)
# for sentences, labels in loaders['train']:
# for k, sentence in enumerate(sentences):
# print(f'showing {k}-th video')
# sentence = sentence.permute(0, 3, 1, 2)
# sentence_generated = torch.zeros(sentence.shape, dtype=torch.float32).cuda()
# for frame_out in range(num_frames):
# for frame_in in range(num_frames):
# for pt_out in range(num_frames):
# i_in, j_in = k_to_ij(num_frames, frame_out)
# i_out, j_out = k_to_ij(num_frames, pt_out)
# sentence_generated[frame_out,:,i_out*sz_in_row:(i_out+1)*sz_in_row, j_out*sz_in_row:(j_out+1)*sz_in_row] = \
# sentence[frame_in, :, i_in*sz_in_row:(i_in+1)*sz_in_row, j_in*sz_in_row:(j_in+1)*sz_in_row]
# for j in range(sentence_generated.shape[0]):
# print(f'---showing {j}-th frame')
# plt.figure()
# img = sentence_generated[j,:,:].permute(1,2,0).cpu().numpy()
# img -= img.min()
# img /= img.max() / 255.
# img = np.array(img, dtype=np.uint8)
# print(f'max {img[j,:,:].max()}, min {img[j,:,:].min()}')
# plt.imshow( img)
# plt.show()
# break
# break
# sentence_generated.shape
# sentence_generated[0].max()
# sentence[0].min()
```
| github_jupyter |
# Getting started in scikit-learn with the famous iris dataset
*From the video series: [Introduction to machine learning with scikit-learn](https://github.com/justmarkham/scikit-learn-videos)*
```
#environment setup with watermark
%load_ext watermark
%watermark -a 'Gopala KR' -u -d -v -p watermark,numpy,pandas,matplotlib,nltk,sklearn,tensorflow,theano,mxnet,chainer
```
## Agenda
- What is the famous iris dataset, and how does it relate to machine learning?
- How do we load the iris dataset into scikit-learn?
- How do we describe a dataset using machine learning terminology?
- What are scikit-learn's four key requirements for working with data?
## Introducing the iris dataset

- 50 samples of 3 different species of iris (150 samples total)
- Measurements: sepal length, sepal width, petal length, petal width
```
from IPython.display import IFrame
IFrame('http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', width=300, height=200)
```
## Machine learning on the iris dataset
- Framed as a **supervised learning** problem: Predict the species of an iris using the measurements
- Famous dataset for machine learning because prediction is **easy**
- Learn more about the iris dataset: [UCI Machine Learning Repository](http://archive.ics.uci.edu/ml/datasets/Iris)
## Loading the iris dataset into scikit-learn
```
# import load_iris function from datasets module
from sklearn.datasets import load_iris
# save "bunch" object containing iris dataset and its attributes
iris = load_iris()
type(iris)
# print the iris data
print(iris.data)
```
## Machine learning terminology
- Each row is an **observation** (also known as: sample, example, instance, record)
- Each column is a **feature** (also known as: predictor, attribute, independent variable, input, regressor, covariate)
```
# print the names of the four features
print(iris.feature_names)
# print integers representing the species of each observation
print(iris.target)
# print the encoding scheme for species: 0 = setosa, 1 = versicolor, 2 = virginica
print(iris.target_names)
```
- Each value we are predicting is the **response** (also known as: target, outcome, label, dependent variable)
- **Classification** is supervised learning in which the response is categorical
- **Regression** is supervised learning in which the response is ordered and continuous
## Requirements for working with data in scikit-learn
1. Features and response are **separate objects**
2. Features and response should be **numeric**
3. Features and response should be **NumPy arrays**
4. Features and response should have **specific shapes**
```
# check the types of the features and response
print(type(iris.data))
print(type(iris.target))
# check the shape of the features (first dimension = number of observations, second dimensions = number of features)
print(iris.data.shape)
# check the shape of the response (single dimension matching the number of observations)
print(iris.target.shape)
# store feature matrix in "X"
X = iris.data
# store response vector in "y"
y = iris.target
```
## Resources
- scikit-learn documentation: [Dataset loading utilities](http://scikit-learn.org/stable/datasets/)
- Jake VanderPlas: Fast Numerical Computing with NumPy ([slides](https://speakerdeck.com/jakevdp/losing-your-loops-fast-numerical-computing-with-numpy-pycon-2015), [video](https://www.youtube.com/watch?v=EEUXKG97YRw))
- Scott Shell: [An Introduction to NumPy](http://www.engr.ucsb.edu/~shell/che210d/numpy.pdf) (PDF)
## Comments or Questions?
- Email: <[email protected]>
- Website: http://dataschool.io
- Twitter: [@justmarkham](https://twitter.com/justmarkham)
```
from IPython.core.display import HTML
def css_styling():
styles = open("styles/custom.css", "r").read()
return HTML(styles)
css_styling()
test complete; Gopal
```
| github_jupyter |
# How to Use Forecasters in Merlion
This notebook will guide you through using all the key features of forecasters in Merlion. Specifically, we will explain
1. Initializing a forecasting model (including ensembles and automatic model selectors)
1. Training the model
1. Producing a forecast with the model
1. Visualizing the model's predictions
1. Quantitatively evaluating the model
1. Saving and loading a trained model
1. Simulating the live deployment of a model using a `ForecastEvaluator`
We will be using a single example time series for this whole notebook. We load it now:
```
import matplotlib.pyplot as plt
import numpy as np
from merlion.utils.time_series import TimeSeries
from ts_datasets.forecast import M4
# Load the time series
# time_series is a time-indexed pandas.DataFrame
# trainval is a time-indexed pandas.Series indicating whether each timestamp is for training or testing
time_series, metadata = M4(subset="Hourly")[5]
trainval = metadata["trainval"]
# Is there any missing data?
timedeltas = np.diff(time_series.index)
print(f"Has missing data: {any(timedeltas != timedeltas[0])}")
# Visualize the time series and draw a dotted line to indicate the train/test split
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(111)
ax.plot(time_series)
ax.axvline(time_series[trainval].index[-1], ls="--", lw="2", c="k")
plt.show()
# Split the time series into train/test splits, and convert it to Merlion format
train_data = TimeSeries.from_pd(time_series[trainval])
test_data = TimeSeries.from_pd(time_series[~trainval])
print(f"{len(train_data)} points in train split, "
f"{len(test_data)} points in test split.")
```
## Model Initialization
In this notebook, we will use three different forecasting models:
1. ARIMA (a classic stochastic process model)
2. Prophet (Facebook's popular time series forecasting model)
3. MSES (the Multi-Scale Exponential Smoothing model, developed in-house)
Let's start by initializing each of them.
```
# Import models & configs
from merlion.models.forecast.arima import Arima, ArimaConfig
from merlion.models.forecast.prophet import Prophet, ProphetConfig
from merlion.models.forecast.smoother import MSES, MSESConfig
# Import data pre-processing transforms
from merlion.transform.base import Identity
from merlion.transform.resample import TemporalResample
# All models are initialized using the syntax ModelClass(config),
# where config is a model-specific configuration object. This is where
# you specify any algorithm-specific hyperparameters, as well as any
# data pre-processing transforms.
# ARIMA assumes that input data is sampled at a regular interval,
# so we set its transform to resample at that interval. We must also specify
# a maximum prediction horizon.
config1 = ArimaConfig(max_forecast_steps=100, order=(20, 1, 5),
transform=TemporalResample(granularity="1h"))
model1 = Arima(config1)
# Prophet has no real assumptions on the input data (and doesn't require
# a maximum prediction horizon), so we skip data pre-processing by using
# the Identity transform.
config2 = ProphetConfig(max_forecast_steps=None, transform=Identity())
model2 = Prophet(config2)
# MSES assumes that the input data is sampled at a regular interval,
# and requires us to specify a maximum prediction horizon. We will
# also specify its look-back hyperparameter to be 60 here
config3 = MSESConfig(max_forecast_steps=100, max_backstep=60,
transform=TemporalResample(granularity="1h"))
model3 = MSES(config3)
```
Now that we have initialized the individual models, we will also combine them in two different ensembles: `ensemble` simply takes the mean prediction of each individual model, and `selector` selects the best individual model based on its sMAPE (symmetric Mean Average Precision Error). The sMAPE is a metric used to evaluate the quality of a continuous forecast. For ground truth $y \in \mathbb{R}^T$ and prediction $\hat{y} \in \mathbb{R}^T$, the sMAPE is computed as
$$
\mathrm{sMAPE}(y, \hat{y}) = \frac{200}{T} \sum_{t = 1}^{T} \frac{\lvert \hat{y}_t - y_t \rvert}{\lvert\hat{y}_t\rvert + \lvert y_t \rvert}
$$
```
from merlion.evaluate.forecast import ForecastMetric
from merlion.models.ensemble.combine import Mean, ModelSelector
from merlion.models.ensemble.forecast import ForecasterEnsemble, ForecasterEnsembleConfig
# The ForecasterEnsemble is a forecaster, and we treat it as a first-class model.
# Its config takes a combiner object, specifying how you want to combine the
# predictions of individual models in the ensemble. There are two ways to specify
# the actual models in the ensemble, which we cover below.
# The first way to specify the models in the ensemble is to provide their individual
# configs when initializing the ForecasterEnsembleConfig. Note that if using this
# syntax, you must also provide the names of the model classes.
#
# The combiner here will simply take the mean prediction of the ensembles here
ensemble_config = ForecasterEnsembleConfig(
combiner=Mean(),
model_configs=[(type(model1).__name__, config1),
(type(model2).__name__, config2),
(type(model3).__name__, config3)])
ensemble = ForecasterEnsemble(config=ensemble_config)
# Alternatively, you can skip giving the individual model configs to the
# ForecasterEnsembleConfig, and instead directly specify the models when
# initializing the ForecasterEnsemble itself.
#
# The combiner here uses the sMAPE to compare individual models, and
# selects the model with the lowest sMAPE
selector_config = ForecasterEnsembleConfig(
combiner=ModelSelector(metric=ForecastMetric.sMAPE))
selector = ForecasterEnsemble(
config=selector_config, models=[model1, model2, model3])
```
## Model Training
All forecasting models (and ensembles) share the same API for training. The `train()` method returns the model's predictions and standard error of those predictions on the training data. Note that the standard error is just `None` if the model doesn't support uncertainty estimation (this is the case for MSES and ensembles).
```
print(f"Training {type(model1).__name__}...")
forecast1, stderr1 = model1.train(train_data)
print(f"\nTraining {type(model2).__name__}...")
forecast2, stderr2 = model2.train(train_data)
print(f"\nTraining {type(model3).__name__}...")
forecast3, stderr3 = model3.train(train_data)
print("\nTraining ensemble...")
forecast_e, stderr_e = ensemble.train(train_data)
print("\nTraining model selector...")
forecast_s, stderr_s = selector.train(train_data)
print("Done!")
```
## Model Inference
To obtain a forecast from a trained model, we simply call `model.forecast()` with the Unix timestamps at which we the model to generate a forecast. In many cases, you may obtain these directly from a time series as shown below.
```
# Truncate the test data to ensure that we are within each model's maximum
# forecast horizon.
sub_test_data = test_data[:50]
# Obtain the time stamps corresponding to the test data
time_stamps = sub_test_data.univariates[sub_test_data.names[0]].time_stamps
# Get the forecast & standard error of each model. These are both
# merlion.utils.TimeSeries objects. Note that the standard error is None for
# models which don't support uncertainty estimation (like MSES and all
# ensembles).
forecast1, stderr1 = model1.forecast(time_stamps=time_stamps)
forecast2, stderr2 = model2.forecast(time_stamps=time_stamps)
# You may optionally specify a time series prefix as context. If one isn't
# specified, the prefix is assumed to be the training data. Here, we just make
# this dependence explicit. More generally, this feature is useful if you want
# to use a pre-trained model to make predictions on data further in the future
# from the last time it was trained.
forecast3, stderr3 = model3.forecast(time_stamps=time_stamps, time_series_prev=train_data)
# The same options are available for ensembles as well, though the stderr is None
forecast_e, stderr_e = ensemble.forecast(time_stamps=time_stamps)
forecast_s, stderr_s = selector.forecast(time_stamps=time_stamps, time_series_prev=train_data)
```
## Model Visualization and Quantitative Evaluation
It is fairly transparent to visualize a model's forecast and also quantitatively evaluate the forecast, using standard metrics like sMAPE. We show examples for all five models below.
Below, we quantitatively evaluate the models using the sMAPE metric. However, the `ForecastMetric` enum includes a number of other options as well. In general, you may use the syntax
```
ForecastMetric.<metric_name>.value(ground_truth=ground_truth, predict=forecast)
```
where `<metric_name>` is the name of the evaluation metric (see the API docs for details and more options), `ground_truth` is the original time series, and `forecast` is the forecast returned by the model. We show concrete examples with `ForecastMetric.sMAPE` below.
```
from merlion.evaluate.forecast import ForecastMetric
# We begin by computing the sMAPE of ARIMA's forecast (scale is 0 to 100)
smape1 = ForecastMetric.sMAPE.value(ground_truth=sub_test_data,
predict=forecast1)
print(f"{type(model1).__name__} sMAPE is {smape1:.3f}")
# Next, we can visualize the actual forecast, and understand why it
# attains this particular sMAPE. Since ARIMA supports uncertainty
# estimation, we plot its error bars too.
fig, ax = model1.plot_forecast(time_series=sub_test_data,
plot_forecast_uncertainty=True)
plt.show()
# We begin by computing the sMAPE of Prophet's forecast (scale is 0 to 100)
smape2 = ForecastMetric.sMAPE.value(sub_test_data, forecast2)
print(f"{type(model2).__name__} sMAPE is {smape2:.3f}")
# Next, we can visualize the actual forecast, and understand why it
# attains this particular sMAPE. Since Prophet supports uncertainty
# estimation, we plot its error bars too.
# Note that we can specify time_series_prev here as well, though it
# will not be visualized unless we also supply the keyword argument
# plot_time_series_prev=True.
fig, ax = model2.plot_forecast(time_series=sub_test_data,
time_series_prev=train_data,
plot_forecast_uncertainty=True)
plt.show()
# We begin by computing the sMAPE of MSES's forecast (scale is 0 to 100)
smape3 = ForecastMetric.sMAPE.value(sub_test_data, forecast3)
print(f"{type(model3).__name__} sMAPE is {smape3:.3f}")
# Next, we visualize the actual forecast, and understand why it
# attains this particular sMAPE.
fig, ax = model3.plot_forecast(time_series=sub_test_data,
plot_forecast_uncertainty=True)
plt.show()
# Compute the sMAPE of the ensemble's forecast (scale is 0 to 100)
smape_e = ForecastMetric.sMAPE.value(sub_test_data, forecast_e)
print(f"Ensemble sMAPE is {smape_e:.3f}")
# Visualize the forecast.
fig, ax = ensemble.plot_forecast(time_series=sub_test_data,
plot_forecast_uncertainty=True)
plt.show()
# Compute the sMAPE of the selector's forecast (scale is 0 to 100)
smape_s = ForecastMetric.sMAPE.value(sub_test_data, forecast_s)
print(f"Selector sMAPE is {smape_s:.3f}")
# Visualize the forecast.
fig, ax = selector.plot_forecast(time_series=sub_test_data,
plot_forecast_uncertainty=True)
plt.show()
```
## Saving & Loading Models
All models have a `save()` method and `load()` class method. Models may also be loaded with the assistance of the `ModelFactory`, which works for arbitrary models. The `save()` method creates a new directory at the specified path, where it saves a `json` file representing the model's config, as well as a binary file for the model's state.
We will demonstrate these behaviors using our `Prophet` model (`model2`) for concreteness.
```
import json
import os
import pprint
from merlion.models.factory import ModelFactory
# Save the model
os.makedirs("models", exist_ok=True)
path = os.path.join("models", "prophet")
model2.save(path)
# Print the config saved
pp = pprint.PrettyPrinter()
with open(os.path.join(path, "config.json")) as f:
print(f"{type(model2).__name__} Config")
pp.pprint(json.load(f))
# Load the model using Prophet.load()
model2_loaded = Prophet.load(dirname=path)
# Load the model using the ModelFactory
model2_factory_loaded = ModelFactory.load(name="Prophet", model_path=path)
```
We can do the same exact thing with ensembles! Note that the ensemble saves each of its sub-models in a different sub-directory, which it tracks manually. Additionally, the combiner (which is saved in the `ForecasterEnsembleConfig`), keeps track of the sMAPE achieved by each model (the `metric_values` key).
```
# Save the selector
path = os.path.join("models", "selector")
selector.save(path)
# Print the config saved. Note that we've saved all individual models,
# and their paths are specified under the model_paths key.
pp = pprint.PrettyPrinter()
with open(os.path.join(path, "config.json")) as f:
print(f"Selector Config")
pp.pprint(json.load(f))
# Load the selector
selector_loaded = ForecasterEnsemble.load(dirname=path)
# Load the selector using the ModelFactory
selector_factory_loaded = ModelFactory.load(name="ForecasterEnsemble", model_path=path)
```
## Simulating Live Model Deployment
A typical model deployment scenario is as follows:
1. Train an initial model on some recent historical data
1. At a regular interval `cadence`, obtain the model's forecast for a certain `horizon`
1. At a regular interval `retrain_freq`, retrain the entire model on the most recent data
1. Optionally, specify a maximum amount of data (`train_window`) that the model should use for training
We provide a `ForecastEvaluator` object which simulates the above deployment scenario, and also allows a user to evaluate the quality of the forecaster according to an evaluation metric of their choice. We illustrate two examples below, using ARIMA for the first example, and the model selector for the second.
```
from merlion.evaluate.forecast import ForecastEvaluator, ForecastEvaluatorConfig, ForecastMetric
def create_evaluator(model):
# Re-initialize the model, so we can re-train it from scratch
model.reset()
# Create an evaluation pipeline for the model, where we
# -- get the model's forecast every hour
# -- have the model forecast for a horizon of 6 hours
# -- re-train the model every 12 hours
# -- when we re-train the model, retrain it on only the past 2 weeks of data
evaluator = ForecastEvaluator(
model=model, config=ForecastEvaluatorConfig(
cadence="1h", horizon="6h", retrain_freq="12h", train_window="14d")
)
return evaluator
```
First, let's evaluate ARIMA.
```
# Obtain the results of running the evaluation pipeline for ARIMA.
# These result objects are to be treated as a black box, and should be
# passed directly to the evaluator's evaluate() method.
model1_evaluator = create_evaluator(model1)
model1_train_result, model1_test_result = model1_evaluator.get_predict(
train_vals=train_data, test_vals=test_data)
# Evaluate ARIMA's sMAPE and RMSE
smape = model1_evaluator.evaluate(
ground_truth=test_data,
predict=model1_test_result,
metric=ForecastMetric.sMAPE)
rmse = model1_evaluator.evaluate(
ground_truth=test_data,
predict=model1_test_result,
metric=ForecastMetric.RMSE)
print(f"{type(model1).__name__} sMAPE: {smape:.3f}")
print(f"{type(model1).__name__} RMSE: {rmse:.3f}")
```
Next, we will evaluate the ensemble (taking the mean prediction of ARIMA, Prophet, and MSES every time the models are called).
```
# Obtain the results of running the evaluation pipeline for the ensemble.
# These result objects are to be treated as a black box, and should be
# passed directly to the evaluator's evaluate() method.
ensemble_evaluator = create_evaluator(ensemble)
ensemble_train_result, ensemble_test_result = ensemble_evaluator.get_predict(
train_vals=train_data, test_vals=test_data)
# Evaluate the selector's sMAPE and RMSE
smape = ensemble_evaluator.evaluate(
ground_truth=test_data,
predict=ensemble_test_result,
metric=ForecastMetric.sMAPE)
rmse = ensemble_evaluator.evaluate(
ground_truth=test_data,
predict=ensemble_test_result,
metric=ForecastMetric.RMSE)
print(f"Ensemble sMAPE: {smape:.3f}")
print(f"Ensemble RMSE: {rmse:.3f}")
```
| github_jupyter |
# Strings
```
name = "Robin"
```
## Multi line strings
```
paragraph = "I am thinking of writing something that spans"\
"multiple lines and Nobody is helping me with that. So here"\
"is me typing something random"
print(paragraph)
# \n represents Newline
paragraph = "I am thinking of writing something that spans\n\
multiple lines and Nobody is helping me with that. So here\n\
is me typing something random"
print(paragraph)
```
## String indices
```
sample_string = "Sorry Madam"
# Subscipt operator : []
sample_string[1] # sample_string of 1
sample_string[2]
'''
*******************************************
Example of a multi-line comment:
To access the first character of the string
you need to use the index 0
*******************************************
'''
sample_string[0]
'''
To access a part of string, use a colon notation in the
subscript operator []
'''
sample_string[0:5]
# give me the string madam from the sample_string
sample_string[6:11]
# Slice the string from index 6 and go until the end
sample_string[6:]
# give me string "Sorry" without writing 0 as index
sample_string[:5]
print(sample_string)
# Negative index: -1 will access the last element
print(sample_string[-1])
# access first element with negative index
print (sample_string[-11])
# This index is invalid
print (sample_string[-12])
sample_string[11]
# Python tries to slice the string
# by reading from left to right
# Indices in the statement below are wrong
sample_string[-4:-10]
sample_string[-10:-4]
sample_string[0:5]
'''
Slice the string from index 0 to 4
with the jump of 2
'''
sample_string[0:5:2]
sample_string[-5:0] # This will not work
sample_string[-5:] # will give you the desired result
sample_string2 = "I love Python"
# Slice this string and give me every third characater
# Expected outout : "Io tn"
# Pythonic
print(sample_string2[0::3])
print(sample_string2[::3]) # most pythonic
print(sample_string2[0:14:3])
print(sample_string2[0:15:3])
num1 = "5"
num2 = "3"
print(num1+ num2)
sample_string2
print(sample_string2[0]+sample_string2[7:14])
print(sample_string2[0]+ sample_string2[2]+sample_string2[7:14])
print(sample_string, sample_string2)
print(sample_string + sample_string2)
print(sample_string + "!! "+ sample_string2)
# to convert a string into lower case characters
sample_string.lower()
sample_string.upper()
sample_string.count()
type(sample_string)
help(str.count)
sample_string
sample_string.count('a')
fruit = "banana"
#it has overlapping word ana
fruit.count('ana')
sample_string.count('r',0,3)
sample_string
# Find length of the string
# i.e. number of characters in the string
len(sample_string)
help(len)
name = "Jeroen"
age = 27
country = "Netherlands"
print("Hoi, I am {}. I am {} years old.I come from {}".format(name,age, country) )
fruit
fruit2="guanabana"
fruit == 'banana'
is_it_raining = False
```
### Conditional operators
```
== : Compare two expressions for equality
!= : compare for inequality
< : compare less than
> : greater than
<= : less than or equal to
>= : greater than or equal to
```
```
fruit == 'banana'
fruit != 'orange'
print("fruit =", fruit)
print("fruit2 =", fruit2)
fruit[0:4] == fruit2[5:9]
```
### Conditional statements
```
it_is_raining = False
it_is_sunny = not it_is_raining
if it_is_sunny:
print("I will go swimming in Sloterplas")
else:
print("I will work on Python (coding)")
it_is_raining = True
it_is_sunny = not it_is_raining
if it_is_sunny:
print("I will go swimming in Sloterplas")
print("I will run")
else:
print("I will work on Python (coding)")
# Accept a number from user (input)
# If the number is even, print "Hurray"
# Else print "Meah"
number = int(input("Enter a number : "))
if number%2 == 0:
print ("Hurray")
else:
print("Meah")
x = 3 # Assignment
print(x)
print(x%2)
time = float(input("Enter a number between 0 and 23"))
if time >= 0 and time <= 8:
print("I am asleep")
elif time >8 and time <= 10:
print("Morning rituals")
elif time > 10 and time <= 13:
print("I am Pythoning")
elif time >13 and time <= 14:
print("I am lunching")
elif time >14 and time < 17:
print("I am researching")
else:
print("I am having fun")
```
### Loops
```
# Not so smart way of printing Hello 5 times
print("Hello")
print("Hello")
print("Hello")
print("Hello")
print("Hello")
# Smart way of printing Hello 5 times
for i in range(5):
print("Hello")
for i in range(5):
print(i)
for i in range(1,6):
print(i)
for u in range(1,6):
print(u, ")", "Hello")
sample_string
'''
a way of accessing individual characters in string
by index
'''
some_number = 15
for i in range(len(sample_string)):
print("[",str(i),"]:", sample_string[i], some_number)
```
```
i = 0
print("[",str(i),"]:", sample_string[0], some_number)
i = 1
print("[",str(i),"]:", sample_string[1], some_number)
i = 2
print("[",str(i),"]:", sample_string[2], some_number)
i = 3
print("[",str(i),"]:", sample_string[3], some_number)
...
...
i = 10
print("[",str(i),"]:", sample_string[10], some_number)
```
```
len(sample_string)
```
```
n = input()
n= 12
12
24
36
48
60
72
84
96
108
120
n = 4
4
8
12
16
20
24
.
40
```
```
n = int(input())
for i in range(1,11):
print(i*n)
```
| github_jupyter |
###Set up working directory
```
cd /usr/local/notebooks
mkdir -p ./workdir
#check seqfile files to process in data directory (make sure you still remember the data directory)
!ls ./data/test/data
```
#README
## This part of pipeline search for the SSU rRNA gene fragments, classify them, and extract reads aligned specific region. It is also heavy lifting part of the whole pipeline (more cpu will help).
## This part works with one seqfile a time. You just need to change the "Seqfile" and maybe other parameters in the two cells bellow.
## To run commands, click "Cell" then "Run All". After it finishes, you will see "\*** pipeline runs successsfully :)" at bottom of this pape.
##If your computer has many processors, there are two ways to make use of the resource:
1. Set "Cpu" higher number.
2. make more copies of this notebook (click "File" then "Make a copy" in menu bar), so you can run the step on multiple files at the same time.
(Again we assume the "Seqfile" is quality trimmed.)
###Here we will process one file at a time; set the "Seqfile" variable to the seqfile name to be be processed
###First part of seqfile basename (separated by ".") will be the label of this sample, so named it properly.
e.g. for "/usr/local/notebooks/data/test/data/1c.fa", "1c" will the label of this sample.
```
Seqfile='./data/test/data/2d.fa'
```
###Other parameters to set
```
Cpu='2' # number of maxixum threads for search and alignment
Hmm='./data/SSUsearch_db/Hmm.ssu.hmm' # hmm model for ssu
Gene='ssu'
Script_dir='./SSUsearch/scripts'
Gene_model_org='./data/SSUsearch_db/Gene_model_org.16s_ecoli_J01695.fasta'
Ali_template='./data/SSUsearch_db/Ali_template.silva_ssu.fasta'
Start='577' #pick regions for de novo clustering
End='727'
Len_cutoff='100' # min length for reads picked for the region
Gene_tax='./data/SSUsearch_db/Gene_tax.silva_taxa_family.tax' # silva 108 ref
Gene_db='./data/SSUsearch_db/Gene_db.silva_108_rep_set.fasta'
Gene_tax_cc='./data/SSUsearch_db/Gene_tax_cc.greengene_97_otus.tax' # greengene 2012.10 ref for copy correction
Gene_db_cc='./data/SSUsearch_db/Gene_db_cc.greengene_97_otus.fasta'
# first part of file basename will the label of this sample
import os
Filename=os.path.basename(Seqfile)
Tag=Filename.split('.')[0]
import os
Hmm=os.path.abspath(Hmm)
Seqfile=os.path.abspath(Seqfile)
Script_dir=os.path.abspath(Script_dir)
Gene_model_org=os.path.abspath(Gene_model_org)
Ali_template=os.path.abspath(Ali_template)
Gene_tax=os.path.abspath(Gene_tax)
Gene_db=os.path.abspath(Gene_db)
Gene_tax_cc=os.path.abspath(Gene_tax_cc)
Gene_db_cc=os.path.abspath(Gene_db_cc)
os.environ.update(
{'Cpu':Cpu,
'Hmm':os.path.abspath(Hmm),
'Gene':Gene,
'Seqfile':os.path.abspath(Seqfile),
'Filename':Filename,
'Tag':Tag,
'Script_dir':os.path.abspath(Script_dir),
'Gene_model_org':os.path.abspath(Gene_model_org),
'Ali_template':os.path.abspath(Ali_template),
'Start':Start,
'End':End,
'Len_cutoff':Len_cutoff,
'Gene_tax':os.path.abspath(Gene_tax),
'Gene_db':os.path.abspath(Gene_db),
'Gene_tax_cc':os.path.abspath(Gene_tax_cc),
'Gene_db_cc':os.path.abspath(Gene_db_cc)})
!echo "*** make sure: parameters are right"
!echo "Seqfile: $Seqfile\nCpu: $Cpu\nFilename: $Filename\nTag: $Tag"
cd workdir
mkdir -p $Tag.ssu.out
### start hmmsearch
!echo "*** hmmsearch starting"
!time hmmsearch --incE 10 --incdomE 10 --cpu $Cpu \
--domtblout $Tag.ssu.out/$Tag.qc.$Gene.hmmdomtblout \
-o /dev/null -A $Tag.ssu.out/$Tag.qc.$Gene.sto \
$Hmm $Seqfile
!echo "*** hmmsearch finished"
!python $Script_dir/get-seq-from-hmmout.py \
$Tag.ssu.out/$Tag.qc.$Gene.hmmdomtblout \
$Tag.ssu.out/$Tag.qc.$Gene.sto \
$Tag.ssu.out/$Tag.qc.$Gene
```
### Pass hits to mothur aligner
```
!echo "*** Starting mothur align"
!cat $Gene_model_org $Tag.ssu.out/$Tag.qc.$Gene > $Tag.ssu.out/$Tag.qc.$Gene.RFadded
# mothur does not allow tab between its flags, thus no indents here
!time mothur "#align.seqs(candidate=$Tag.ssu.out/$Tag.qc.$Gene.RFadded, template=$Ali_template, threshold=0.5, flip=t, processors=$Cpu)"
!rm -f mothur.*.logfile
```
### Get aligned seqs that have > 50% matched to references
```
!python $Script_dir/mothur-align-report-parser-cutoff.py \
$Tag.ssu.out/$Tag.qc.$Gene.align.report \
$Tag.ssu.out/$Tag.qc.$Gene.align \
$Tag.ssu.out/$Tag.qc.$Gene.align.filter \
0.5
!python $Script_dir/remove-gap.py $Tag.ssu.out/$Tag.qc.$Gene.align.filter $Tag.ssu.out/$Tag.qc.$Gene.align.filter.fa
```
### Search is done here (the computational intensive part). Hooray!
- \$Tag.ssu.out/\$Tag.qc.\$Gene.align.filter:
aligned SSU rRNA gene fragments
- \$Tag.ssu.out/\$Tag.qc.\$Gene.align.filter.fa:
unaligned SSU rRNA gene fragments
### Extract the reads mapped 150bp region in V4 (577-727 in *E.coli* SSU rRNA gene position) for unsupervised clustering
```
!python $Script_dir/region-cut.py $Tag.ssu.out/$Tag.qc.$Gene.align.filter $Start $End $Len_cutoff
!mv $Tag.ssu.out/$Tag.qc.$Gene.align.filter."$Start"to"$End".cut.lenscreen $Tag.ssu.out/$Tag.forclust
```
### Classify SSU rRNA gene seqs using SILVA
```
!rm -f $Tag.ssu.out/$Tag.qc.$Gene.align.filter.*.wang.taxonomy
!mothur "#classify.seqs(fasta=$Tag.ssu.out/$Tag.qc.$Gene.align.filter.fa, template=$Gene_db, taxonomy=$Gene_tax, cutoff=50, processors=$Cpu)"
!mv $Tag.ssu.out/$Tag.qc.$Gene.align.filter.*.wang.taxonomy \
$Tag.ssu.out/$Tag.qc.$Gene.align.filter.wang.silva.taxonomy
!python $Script_dir/count-taxon.py \
$Tag.ssu.out/$Tag.qc.$Gene.align.filter.wang.silva.taxonomy \
$Tag.ssu.out/$Tag.qc.$Gene.align.filter.wang.silva.taxonomy.count
!rm -f mothur.*.logfile
```
### Classify SSU rRNA gene seqs with Greengene for copy correction later
```
!rm -f $Tag.ssu.out/$Tag.qc.$Gene.align.filter.*.wang.taxonomy
!mothur "#classify.seqs(fasta=$Tag.ssu.out/$Tag.qc.$Gene.align.filter.fa, template=$Gene_db_cc, taxonomy=$Gene_tax_cc, cutoff=50, processors=$Cpu)"
!mv $Tag.ssu.out/$Tag.qc.$Gene.align.filter.*.wang.taxonomy \
$Tag.ssu.out/$Tag.qc.$Gene.align.filter.wang.gg.taxonomy
!python $Script_dir/count-taxon.py \
$Tag.ssu.out/$Tag.qc.$Gene.align.filter.wang.gg.taxonomy \
$Tag.ssu.out/$Tag.qc.$Gene.align.filter.wang.gg.taxonomy.count
!rm -f mothur.*.logfile
# check the output directory
!ls $Tag.ssu.out
```
### This part of pipeline (working with one sequence file) finishes here. Next we will combine samples for community analysis (see unsupervised analysis).
Following are files useful for community analysis:
* 1c.577to727: aligned fasta file of seqs mapped to target region for de novo clustering
* 1c.qc.ssu.align.filter: aligned fasta file of all SSU rRNA gene fragments
* 1c.qc.ssu.align.filter.wang.gg.taxonomy: Greengene taxonomy (for copy correction)
* 1c.qc.ssu.align.filter.wang.silva.taxonomy: SILVA taxonomy
```
!echo "*** pipeline runs successsfully :)"
```
| github_jupyter |
# Classification and Regression
There are two major types of supervised machine learning problems, called *classification* and *regression*.
In classification, the goal is to predict a *class label*, which is a choice from a predefined list of possibilities. In *Intro_to_Decision_Trees.ipynb* we used the example of classifying irises into one of three possible species. Classification is sometimes separated into binary classification, which is the special case of distinguishing between exactly two classes, and multiclass classification, which is classification between more than two classes. You can think of binary classification as trying to answer a yes/no question. Classifying emails as either spam or not spam is an example of a binary classification problem. In this binary classification task, the yes/no question being asked would be “Is this email spam?”
For regression tasks, the goal is to predict a *continuous number*, or a floating-point number in programming terms (or real number in mathematical terms). Predicting a person’s annual income from their education, their age, and where they live is an example of a regression task. When predicting income, the predicted value is an amount, and can be any number in a given range. Another example of a regression task is predicting the yield of a corn farm given attributes such as previous yields, weather, and number of employees working on the farm. The yield again can be an arbitrary number.
**An easy way to distinguish between classification and regression tasks is to ask whether there is some kind of continuity in the output. If there is continuity between possible outcomes, then the problem is a regression problem.** Think about predicting annual income. There is a clear continuity in the output. Whether a person makes $40,000 or $40,001 a year does not make a tangible difference, even though these are different amounts of money; if our algorithm predicts $39,999 or $40,001 when it should have predicted $40,000, we don’t mind that much.
By contrast, for the task of recognizing the language of a website (which is a classification problem), there is no matter of degree. A website is in one language, or it is in another. There is no continuity between languages, and there is no language that is between English and French.
*Disclaimer*: Much of the code in this notebook was lifted from the excellent book [Introduction to Machine Learning with Python](http://shop.oreilly.com/product/0636920030515.do) by Andreas Muller and Sarah Guido.
# Generalization, Overfitting, and Underfitting
In supervised learning, we want to build a model on the training data and then be able to make accurate predictions on new, unseen data that has the same characteristics as the training set that we used. If a model is able to make accurate predictions on unseen data, we say it is able to *generalize* from the training set to the test set. We want to build a model that is able to generalize as accurately as possible.
Usually we build a model in such a way that it can make accurate predictions on the training set. If the training and test sets have enough in common, we expect the model to also be accurate on the test set. However, there are some cases where this can go wrong. For example, if we allow ourselves to build very complex models, we can always be as accurate as we like on the training set.
The only measure of whether an algorithm will perform well on new data is the evaluation on the test set. However, intuitively we expect simple models to generalize better to new data. Therefore, we always want to find the simplest model. Building a model that is too complex for the amount of information we have, as our novice data scientist did, is called *overfitting*. Overfitting occurs when you fit a model too closely to the particularities of the training set and obtain a model that works well on the training set but is not able to generalize to new data. On the other hand, if your model is too simple, then you might not be able to capture all the aspects of and variability in the data, and your model will do badly even on the training set. Choosing too simple a model is called *underfitting*.
The more complex we allow our model to be, the better we will be able to predict on the training data. However, if our model becomes too complex, we start focusing too much on each individual data point in our training set, and the model will not generalize well to new data.
There is a sweet spot in between that will yield the best generalization performance. This is the model we want to find.
# Relation of Model Complexity to Dataset Size
It’s important to note that model complexity is intimately tied to the variation of inputs contained in your training dataset: the larger variety of data points your dataset contains, the more complex a model you can use without overfitting. Usually, collecting more data points will yield more variety, so larger datasets allow building more complex models. However, simply duplicating the same data points or collecting very similar data will not help.
Having more data and building appropriately more complex models can often work wonders for supervised learning tasks. In the real world, you often have the ability to decide how much data to collect, which might be more beneficial than tweaking and tuning your model. Never underestimate the power of more data.
# Linear Models
Linear models are a class of models that are widely used in practice and have been studied extensively in the last few decades, with roots going back over a hundred years. Linear models make a prediction using a linear function of the input features.
## Linear Models for Regression
For regression, the general prediction formula for a linear model looks as follows:
ŷ = w[0] * x[0] + w[1] * x[1] + ... + w[p] * x[p] + b
Here, x[0] to x[p] denotes the features (in this example, the number of features is p) of a single data point, w and b are parameters of the model that are learned, and ŷ is the prediction the model makes. For a dataset with a single feature, this is:
ŷ = w[0] * x[0] + b
which you might remember from high school mathematics as the equation for a line. Here, w[0] is the slope and b is the y-axis offset. For more features, w contains the slopes along each feature axis. Alternatively, you can think of the predicted response as being a weighted sum of the input features, with weights (which can be negative) given by the entries of w.
Linear models for regression can be characterized as regression models for which the prediction is a line for a single feature, a plane when using two features, or a hyperplane in higher dimensions (that is, when using more features).
For datasets with many features, linear models can be very powerful. In particular, if you have more features than training data points, any target y can be perfectly modeled (on the training set) as a linear function.
There are many different linear models for regression. The difference between these models lies in how the model parameters w and b are learned from the training data, and how model complexity can be controlled.
# Linear Regression (aka Ordinary Least Squares)
Linear regression, or *ordinary least squares* (OLS), is the simplest and most classic linear method for regression. Linear regression finds the parameters w and b that minimize the *mean squared error* between predictions and the true regression targets, y, on the training set. The mean squared error is the sum of the squared differences between the predictions and the true values. Linear regression has no parameters, which is a benefit, but it also has no way to control model complexity.
The scikit-learn documentation on [Linear Regression]http://scikit-learn.org/stable/modules/linear_model.html#ordinary-least-squares) has a decent basic example of its use.
## Advantages of Linear Regression (general, not specific to OLS)
* Simple to understand and to interpret, at least for a small number of features/dimensions
* Easy to visualize for 2 or 3 features
* Very fast to train and also fast to predict
* Doesn't suffer from the *curse of dimensionality* that methods such as KNearsetNeighbors does
* Actually linear methods tend to work better with lots of features than with a small number of features
## Big Disadvantage specific to OLS, but not applicable to linear regresison in general
* OLS has no way to control model complexity and can suffer from overfitting, particularly if there are a large number of features
* Modified versions of Linear Regression such as *Ridge Regression* and *Lasso* can mitigate or fix this issue
## Disadvantages of Linear Regression in general, not specific to OLS
* In lower-dimensional spaces, other models might yield better generalization performance
* Requires more data preparation than some other techniques
* Feature normalization is required for best results (for any algorithm which includes regularization)
* Non-ordinal categorical features need to be one-hot encoded
* Ordinal features need to be numerically encoded
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
```
### A First Application: Predicting Boston Housing Prices
One of the most famous datasets for regression in a supervised learning setting is the [Boston Housing data set](https://archive.ics.uci.edu/ml/datasets/Housing). It is a multivariate dataset introduced in a 1978 paper which records 13 attributes concerning housing values in the suburbs of Boston. NOTE: The data is very, very old and the house prices are ridiculously low by today's standards.
scikit-learn has a number of small toy datasets included with it which makes it quick and easy to experiment with different machine learning algorithms on these datasets.
The [sklearn.datasets.load_boston()](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_boston.html#sklearn.datasets.load_boston) method can be used to load the this dataset.
#### Meet the data
The *boston* object that is returned by **load_boston** is a **Bunch** object, which is very similar to a dictionary. It contains keys and values.
Feature Information:
1. CRIM: per capita crime rate by town
2. ZN: proportion of residential land zoned for lots over 25,000 sq.ft.
3. INDUS: proportion of non-retail business acres per town
4. CHAS: Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)
5. NOX: nitric oxides concentration (parts per 10 million)
6. RM: average number of rooms per dwelling
7. AGE: proportion of owner-occupied units built prior to 1940
8. DIS: weighted distances to five Boston employment centres
9. RAD: index of accessibility to radial highways
10. TAX: full-value property-tax rate per $10,000
11. PTRATIO: pupil-teacher ratio by town
12. B: 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town
13. LSTAT: % lower status of the population
Target Information
14. MEDV: Median value of owner-occupied homes in $1000's
```
from sklearn.datasets import load_boston
boston = load_boston()
print("Keys of boston: {}".format(boston.keys()))
# The value of the key DESCR is a short description of the dataset. Here we show the beinning of the description.
print(boston['DESCR'][:193] + "\n...")
# The value of feature_names is a list of strings, giving the abbreviated name of each feature
print("Feature names: {}".format(boston['feature_names']))
# The data itself is contained in the target and data fields.
# data contains the numeric measurements of features in a NumPy array
print("Type of data: {}".format(type(boston['data'])))
# The rows in the data array correspond to neighborhoods, while the columns represent the features
print("Shape of data: {}".format(boston['data'].shape))
# We see that the array contains measurements for 506 different neighborhoods. Here are values for the first 5.
print("First five columns of data:\n{}".format(boston['data'][:5]))
# The target array contains the Median value of owner-occupied homes in $1000's, also as a NumPy array
print("Type of target: {}".format(type(boston['target'])))
# target is a one-dimensional array, with one entry per sample
print("Shape of target: {}".format(boston['target'].shape))
# The target values are positive floating point numbers which represent a median house value in thousands of dollars.
print("Target:\n{}".format(boston['target']))
```
#### Measuring Success: Training and testing data
We want to build a machine learning model from this data that can predict the species of iris for a new set of measurements. But before we can apply our model to new measurements, we need to know whether it actually works -- that is, whether we should trust its predictions.
Unfortunately, we cannot use the data we used to build the model to evaluate it. This is because our model can always simply remember the whole training set, and will therefore always predict the correct label for any point in the training set. This "remembering" does not indicate to us whether the model will *generalize* well (in other words, whether it will also perform well on new data).
To assess the model's performance, we show it new data (data that it hasn't seen before) for which we have labels. This is usually done by splitting the labeled data we have collected (here, our 150 flower measurements) into two parts. One part of the data is used to build our machine learning model, and is called the *training data* or *training set*. The rest of the data will be used to assess how well the model works; this is called the *test data*, *test set*, or *hold-out set*.
scikit-learn contains a function that shuffles the dataset and splits it for you: the [train_test_split](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) function. This function extracts 75% of the rows in the data as the training set, together with the corresponding labels for this data. The remaining 25% of the data, together with the remaining labels, is declared as the test set. Deciding how much data you want to put into the training and the test set respectively is somewhat arbitrary, but scikit-learn's default 75/25 split is a reasonable starting point.
In scikit-learn, data is usually denoted with a capital X, while labels are denoted by a lowercase y. This is inspired by the standard formulation *f(x)=y* in mathematics, where *x* is the input to a function and *y* is the output. Following more conventions from mathematics, we use a capital *X* because the data is a two-dimensional array (a matrix) and a lowercase *y* because the target is a one-dimensional array (a vector).
Before making the split, the **train_test_split** function shuffles the dataset using a pseudorandom number generator. If we just took the last 25% of the data as a test set, all the data points would have the label 2, as the data points are sorted by the label.
To make sure this example code will always get the same output if run multiple times, we provide the pseudorandom number generator with a fixed seed using the **random_state** parameter.
The output of the **train_test_split** function is **X_train**, **X_test**, **y_train**, and **y_test**, which are all NumPy arrays. **X_train** contains 75% of the rows of the dataset, and **X_test** contains the remaining 25%.
```
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(boston['data'], boston['target'], random_state=0)
print("X_train shape: {}".format(X_train.shape))
print("y_train shape: {}".format(y_train.shape))
print("X_test shape: {}".format(X_test.shape))
print("y_test shape: {}".format(y_test.shape))
```
#### First things first: Look at your data
Before building a machine learning model, it is often a good idea to inspect the data, to see if the task is easily solvable without machine learning, or if the desired information might not be contained in the data.
Additionally, inspecting the data is a good way to find abnormalities and peculiarities. Maybe some of your irises were measured using inches and not centimeters, for example. In the real world, inconsistencies in the data and unexpected measurements are very common, as are missing data and not-a-number (NaN) or infinite values.
One of the best ways to inspect data is to visualize it. One way to do this is by using a *scatter plot*. A scatter plot of the data puts one feature along the x-axis and another along the y-axis, and draws a dot for each data point. Unfortunately, computer screens have only two dimensions, which allows us to plot only two (or maybe three) features at a time. It is difficult to plot datasets with more than three features this way. One way around this problem is to do a *pair plot*, which looks at all possible pairs of features. If you have a small number of features, such as the four we have here, this is quite reasonable. You should keep in mind, however, that a pair plot does not show the interaction of all of the features at once, so some interesting aspects of the data may not be revealed when visualizing it this way.
In Python, the *pandas* library has a convenient function called [scatter_matrix](http://pandas.pydata.org/pandas-docs/version/0.18.1/visualization.html#scatter-matrix-plot) for creating pair plots for a DataFrame.
```
# create dataframe from data in X_train
boston_df = pd.DataFrame(X_train, columns=boston.feature_names)
# Add in the target data
boston_df['MEDV'] = y_train
# Look at the first few rows
boston_df.head()
# create a scatter matrix from the dataframe
tmp = pd.scatter_matrix(boston_df, figsize=(15, 15))
```
From the plots, we can see RM has a strong positive linear relationship with MEDV and LSTAT has a strong negative one. This makes sense - the housing price should go up as the number of rooms increases and the housing prices should go down as the percentage of lower class/income families in the neighborhood increases.
```
# Get a high-level overview of the data
boston_df.describe()
# Find which features are most highly correlated with the housing prices
df = boston_df
df['MEDV'] = y_train
df.corr()['MEDV']
```
#### Building your model: Linear Regression
Now we can start building the actual machine learning model. There are many regression algorithms in *scikit-learn* that we could use. Here we will use Ordinary Least Squares (OLS) Linear Regression because it is easy to understand and interpret.
All machine learning models in *scikit-learn* are implemented in their own classes, which are called *Estimator* classes. The Linear Regression algorithm is implemented in the [LinearRegression](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html) class in the **linear_model** module. Before we can use the model, we need to instantiate the class into an object. This is when we will set any parameters of the model. The LinearRegression model doesn't have any particular parameters of importance.
```
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
```
The *lr* object encapsulates the algorithm that will be used to build the model from the training data, as well the algorithm to make predictions on new data points. It will also hold the information that the algorithm has extracted from the training data.
To build the model on the training set, we call the **fit** method of the *lr* object, which takes as arguments the NumPy array *X_train* containing the training data and the NumPy array *y_train* of the corresponding training labels.
```
lr.fit(X_train, y_train)
```
The “slope” parameters (w), also called weights or coefficients, are stored in the coef_ attribute, while the offset or intercept (b) is stored in the intercept_ attribute:
```
print("lr.coef_: {}".format(lr.coef_))
print("lr.intercept_: {}".format(lr.intercept_))
```
The intercept_ attribute is always a single float number, while the coef_ attribute is a NumPy array with one entry per input feature. As we only have 13 input features in this dataset, lr.coef_ has 13 entries.
Let’s look at the training set and test set performance:
```
print("Training set score: {:.2f}".format(lr.score(X_train, y_train)))
print("Test set score: {:.2f}".format(lr.score(X_test, y_test)))
```
An R^2 of around 0.64 on the test set is not very good, but we can see that the scores on the training and test sets are are a decent distance apart. This means we are likely overfitting. With higher-dimensional datasets (meaning datasets with a large number of features), linear models become more powerful, and there is a higher chance of overfitting. More complicated linear models such as *Ridge Regression* and *Lasso* have been designed to help control this overfitting problem.
An R^2 of around 0.77 on the training set is OK, but not great. For a really good fit, we would want an R^2 of around 0.95 or so. This tells us we are missing someting. One possibility is we could do some feature engineering and either include polynomial powers of some of the features and/or include products of some of the features.
Also, linear models tend ot work better when all of the features exist on roughly the same scale, we could attempt to scale our data as well.
# Preprocessing and Scaling
Some algorithms, like neural networks, SVMs, and k-NearestNeighbors are very sensitive to the scaling of the data; while many others such as linear models with regularization (Ridge, Lasso, etc.) are moderately sensitive to the scaling of the data. Therefore, a common practice is to adjust the features so that the data representation is more suitable for these algorithms. Often, this is a simple per-feature rescaling and shift of the data.
## Different Kinds of Preprocessing
Differnt algorithms benefit from different kinds of scaling and thus Scikit-Learn supports a variety of scaling methods, though they all have a similar API.
### StandardScaler
Neural networks expect all input features to vary in a similar way, and ideally to have a mean of 0, and a variance of 1. When using ANN, we must rescale our data so that it fulfills these requirements. For doing this automatically, *scikit-learn* has the [StandardScaler](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html#sklearn.preprocessing.StandardScaler). The **StandardScaler** in *scikit-learn* ensures that for each feature the mean is 0 and the variance is 1, bringing all features to the same magnitude. However, this scaling does not ensure any particular minimum and maximum values for the features.
### MinMaxScaler
A common rescaling method for kernel SVMs is to scale the data such that all features are between 0 and 1. We can do this in *scikit-learn* by using the [MinMaxScaler](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html#sklearn.preprocessing.MinMaxScaler) preprocessing method. The **MinMaxScaler** shifts the data such that all features are exactly between 0 and 1. For a two-dimensional dataset this means all of the data is contained within the rectangle created by the x-axis between 0 and 1 and the y-axis between 0 and 1.
### RobustScaler
Standard scaling does not ensure any particular minimum and maximum values for the features. The [RobustScaler](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.RobustScaler.html#sklearn.preprocessing.RobustScaler) works similarly to the **StandardScaler** in that it ensures statistical properties for each feature that guarantee that they are on the same scale. However, the **RobustScaler** uses the median and quartiles, instead of mean and variance. This makes the **RobustScaler** ignore data points that are very different from the rest (like measurement errors). These odd data points are also called *outliers*, and can lead to trouble for other scaling techniques.
```
# Scale the boston dataset
from sklearn.preprocessing import MinMaxScaler
X = MinMaxScaler().fit_transform(boston.data)
X_train, X_test, y_train, y_test = train_test_split(X, boston['target'], random_state=0)
lr = LinearRegression().fit(X_train, y_train)
print("Training set score: {:.2f}".format(lr.score(X_train, y_train)))
print("Test set score: {:.2f}".format(lr.score(X_test, y_test)))
```
Ordinary Least Squares (OLS) regression is not sensitive to feature scaling, but all of the regularized linear methods which help reduce the overfitting present in OLS are sensitive to feature scaling.
# Feature Engineering
Feature engineering is the process of using domain knowledge of the data to create features that make machine learning algorithms work. Feature engineering is fundamental to the application of machine learning, and is both difficult and expensive. The need for manual feature engineering can be obviated by automated feature learning.
In particular, linear models might benefit greatly from generating new features via techniques such as binning, and adding polynomials and interactions. However, more complex models like random forests and SVMs might be able to learn more complex tasks without explicitly expanding the feature space.
In practice, the features that are used (and the match between features and method) is often the most important piece in making a machine learning approach work well.
## Interactions and Polynomials
One way to enrich a feature representation, particularly for linear models, is adding *interaction features* - products of individual original features. Another way to enrich a feature representation is to use *polynomials* of the original features - for a given feature x, we might want to consider x^2, x^3, x^4, and so on. This kind of feature engineering is often used in statistical modeling, but it’s also common in many practical machine learning applications.
Within *scikit-learn*, the addition of both *interaction features* and *polynomial features* is implemented in [PolynomialFeatures](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PolynomialFeatures.html#sklearn.preprocessing.PolynomialFeatures) in the **preprocessing** module.
In the code below, we modify the boston housing dataset by addig all polynomial features and interactions up to a degree of 2. The data originally had 13 features, which were expanded into 105 interaction features. These new features represent all possible interactions between two different original features, as well as the square of each original feature. degree=2 here means that we look at all features that are the product of up to two original features. The exact correspondence between input and output features can be found using the **get_feature_names** method.
```
from sklearn.datasets import load_boston
from sklearn.preprocessing import MinMaxScaler, PolynomialFeatures, StandardScaler, RobustScaler
def load_extended_boston(scaler='minmax'):
boston = load_boston()
X = boston.data
if 'standard' == scaler:
X = StandardScaler().fit_transform(boston.data)
elif 'robust' == scaler:
X = RobustScaler().fit_transform(boston.data)
else:
X = MinMaxScaler().fit_transform(boston.data)
X = PolynomialFeatures(degree=2).fit_transform(X)
return X, boston.target
X, y = load_extended_boston()
X.shape
# What if we fit this new dataset with a vastly expanded set of features using OLS?
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
lr = LinearRegression().fit(X_train, y_train)
print("Training set score: {:.2f}".format(lr.score(X_train, y_train)))
print("Test set score: {:.2f}".format(lr.score(X_test, y_test)))
```
Now the basic OLS model is doing a dramatically better job fitting the training set (R^2 of 0.95 vs 0.77).
This discrepancy between performance on the training set and the test set is a clear sign of overfitting, and therefore we should try to find a model that allows us to control complexity. One of the most commonly used alternatives to standard linear regression is *ridge regression*, which we will look into next.
# Ridge Regression
Ridge regression is also a linear model for regression, so the formula it uses to make predictions is the same one used for ordinary least squares. In ridge regression, though, the coefficients (w) are chosen not only so that they predict well on the training data, but also to fit an additional constraint. We also want the magnitude of coefficients to be as small as possible; in other words, all entries of w should be close to zero. Intuitively, this means each feature should have as little effect on the outcome as possible (which translates to having a small slope), while still predicting well. This constraint is an example of what is called *regularization*. Regularization means explicitly restricting a model to avoid overfitting. The particular kind used by ridge regression is known as L2 regularization.
Ridge regression is implemented in [linear_model.Ridge](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html#sklearn.linear_model.Ridge). Let’s see how well it does on the extended Boston Housing dataset:
```
from sklearn.linear_model import Ridge
ridge = Ridge().fit(X_train, y_train)
print("Training set score: {:.2f}".format(ridge.score(X_train, y_train)))
print("Test set score: {:.2f}".format(ridge.score(X_test, y_test)))
```
As you can see, the training set score of Ridge is *lower* than for LinearRegression, while the test set score is *higher*. This is consistent with our expectation. With linear regression, we were overfitting our data. Ridge is a more restricted model, so we are less likely to overfit. A less complex model means worse performance on the training set, but better generalization. As we are only interested in generalization performance, we should choose the Ridge model over the LinearRegression model.
The Ridge model makes a trade-off between the simplicity of the model (near-zero coefficients) and its performance on the training set. How much importance the model places on simplicity versus training set performance can be specified by the user, using the **alpha** parameter. In the previous example, we used the default parameter alpha=1.0. There is no reason why this will give us the best trade-off, though. The optimum setting of alpha depends on the particular dataset we are using. Increasing alpha forces coefficients to move more toward zero, which decreases training set performance but might help generalization. For example:
```
ridge10 = Ridge(alpha=10).fit(X_train, y_train)
print("Training set score: {:.2f}".format(ridge10.score(X_train, y_train)))
print("Test set score: {:.2f}".format(ridge10.score(X_test, y_test)))
```
Decreasing alpha allows the coefficients to be less restricted. For very small values of alpha, coefficients are barely restricted at all, and we end up with a model that resembles LinearRegression:
```
ridge01 = Ridge(alpha=0.1).fit(X_train, y_train)
print("Training set score: {:.2f}".format(ridge01.score(X_train, y_train)))
print("Test set score: {:.2f}".format(ridge01.score(X_test, y_test)))
```
Here, alpha=0.1 seems to be working well. We could try decreasing alpha even more to improve generalization. For now, notice how the parameter alpha corresponds to the model complexity.
Very shortly we need to think about systematic methods for properly select optimal values for parameters such as **alpha**.
We can also get a more qualitative insight into how the alpha parameter changes the model by inspecting the coef_ attribute of models with different values of alpha. A higher alpha means a more restricted model, so we expect the entries of coef_ to have smaller magnitude for a high value of alpha than for a low value of alpha. This is confirmed in the plot below:
```
plt.figure(figsize=(15, 10))
plt.plot(ridge.coef_, 's', label="Ridge alpha=1")
plt.plot(ridge10.coef_, '^', label="Ridge alpha=10")
plt.plot(ridge01.coef_, 'v', label="Ridge alpha=0.1")
plt.plot(lr.coef_, 'o', label="LinearRegression")
plt.xlabel("Coefficient index")
plt.ylabel("Coefficient magnitude")
plt.hlines(0, 0, len(lr.coef_))
plt.ylim(-25, 25)
plt.legend()
plt.show()
```
Clearly, the interactions and polynomial features gave us a good boost in performance when using Ridge. When using a more complex model like a random forest, the story can be a bit different, though. Adding features will benefit linear models the most. For very complex models, adding features may actually slightly decrease the performance.
Machine learning is complex. Often you have to try several experiments and just see what works best.
# Model Evaluation and Improvement
To evaluate our supervised models, so far we have split our dataset into a training set and a test set using the **train_test_split function**, built a model on the training set by calling the fit method, and evaluated it on the test set using the score method, which for classification computes the fraction of correctly classified samples and for regression computes the R^2.
Remember, the reason we split our data into training and test sets is that we are interested in measuring how well our model *generalizes* to new, previously unseen data. We are not interested in how well our model fit the training set, but rather in how well it can make predictions for data that was not observed during training.
As we saw when exploring Ridge regression, we need a more robust way to assess generalization performance which is capable of automatically choosing optimal values for hyper-parameters such as **alpha**.
## Cross-Validation
*Cross-validation* is a statistical method of evaluating generalization performance that is more stable and thorough than using a split into a training and a test set. In cross-validation, the data is instead split repeatedly and multiple models are trained. The most commonly used version of cross-validation is *k-fold cross-validation*, where *k* is a user-specified number, usually 5 or 10. When performing five-fold cross-validation, the data is first partitioned into five parts of (approximately) equal size, called *folds*. Next, a sequence of models is trained. The first model is trained using the first fold as the test set, and the remaining folds (2–5) are used as the training set. The model is built using the data in folds 2–5, and then the accuracy is evaluated on fold 1. Then another model is built, this time using fold 2 as the test set and the data in folds 1, 3, 4, and 5 as the training set. This process is repeated using folds 3, 4, and 5 as test sets. For each of these five splits of the data into training and test sets, we compute the accuracy. In the end, we have collected five accuracy values.
Usually, the first fifth of the data is the first fold, the second fifth of the data is the second fold, and so on.
The whole point of cross-validation is to be more robust than a simple train/test split so that the results are not likely to be influenced by a particularly good or bad split of the data. The main disadvantage is that it requires more computation.
### Cross-Validation in scikit-learn
Cross-validation is implemented in scikit-learn using the [cross_val_score](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_val_score.html#sklearn.model_selection.cross_val_score) function from the *model_selection* module. The parameters of the **cross_val_score** function are the model we want to evaluate, the training data, and the ground-truth labels.
```
# Let's evaluate cross-validation on the iris dataset using logistic regression (which is actually classification)
from sklearn.model_selection import cross_val_score
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
iris = load_iris()
logreg = LogisticRegression()
scores = cross_val_score(logreg, iris.data, iris.target)
print("Cross-validation scores: {}".format(scores))
```
By default, cross_val_score performs three-fold cross-validation, returning three accuracy values. We can change the number of folds used by changing the cv parameter:
```
scores = cross_val_score(logreg, iris.data, iris.target, cv=5)
print("Cross-validation scores: {}".format(scores))
```
A common way to summarize the cross-validation accuracy is to compute the mean:
```
print("Average cross-validation score: {:.2f}".format(scores.mean()))
```
Using the mean cross-validation we can conclude that we expect the model to be around 96% accurate on average. Looking at all five scores produced by the five-fold cross-validation, we can also conclude that there is a relatively high variance in the accuracy between folds, ranging from 100% accuracy to 90% accuracy. This could imply that the model is very dependent on the particular folds used for training, but it could also just be a consequence of the small size of the dataset.
### Benefits of Cross-Validation
There are several benefits to using cross-validation instead of a single split into a training and a test set. First, remember that train_test_split performs a random split of the data. Imagine that we are “lucky” when randomly splitting the data, and all examples that are hard to classify end up in the training set. In that case, the test set will only contain “easy” examples, and our test set accuracy will be unrealistically high. Conversely, if we are “unlucky,” we might have randomly put all the hard-to-classify examples in the test set and consequently obtain an unrealistically low score. However, when using cross-validation, each example will be in the training set exactly once: each example is in one of the folds, and each fold is the test set once. Therefore, the model needs to generalize well to all of the samples in the dataset for all of the cross-validation scores (and their mean) to be high.
Having multiple splits of the data also provides some information about how sensitive our model is to the selection of the training dataset. For the iris dataset, we saw accuracies between 90% and 100%. This is quite a range, and it provides us with an idea about how the model might perform in the worst case and best case scenarios when applied to new data.
Another benefit of cross-validation as compared to using a single split of the data is that we use our data more a single split of the data is that we use our data more effectively. When using train_test_split, we usually use 75% of the data for training and 25% of the data for evaluation. When using five-fold cross-validation, in each iteration we can use four-fifths of the data (80%) to fit the model. When using 10-fold cross-validation, we can use nine-tenths of the data (90%) to fit the model. More data will usually result in more accurate models.
The main disadvantage of cross-validation is increased computational cost. As we are now training k models instead of a single model, cross-validation will be roughly k times slower than doing a single split of the data.
It is important to keep in mind that cross-validation is not a way to build a model that can be applied to new data. Cross-validation does not return a model. When calling cross_val_score, multiple models are built internally, but the purpose of cross-validation is only to evaluate how well a given algorithm will generalize when trained on a specific dataset.
# Stratified k-Fold Cross-Validation and Other Strategies
Splitting the dataset into k folds by starting with the first one-k-th part of the data, as described in the previous section, might not always be a good idea. For example, let’s have a look at the boston housing dataset:
```
lr = LinearRegression()
scores = cross_val_score(lr, boston.data, boston.target)
print("Cross-validation scores: {}".format(scores))
```
As we can see, a default 3-fold cross-validation performed ok for the first two folds, but horribly bad for the third one.
The fundamental problem here is that if that data isn't organized in a random way, then just taking folds in order doesn't represent a random sampling for each fold. There are multiple possible ways to mitigate this issue.
### Stratified k-Fold Cross-Validation
As the simple k-fold strategy would obviously fail for classification problems if the data is organized by target category, *scikit-learn* does not use it for classification, but rather uses *stratified k-fold cross-validation*. In stratified cross-validation, we split the data such that the proportions between classes are the same in each fold as they are in the whole dataset.
*scikit-learn* supports startified k-fold cross-validation via the [StratifiedKFold](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.StratifiedKFold.html#sklearn.model_selection.StratifiedKFold) class in the *model_selection* module.
For example, if 90% of your samples belong to class A and 10% of your samples belong to class B, then stratified cross-validation ensures that in each fold, 90% of samples belong to class A and 10% of samples belong to class B.
For regression, *scikit-learn* uses the standard k-fold cross-validation by default.
### Shuffle-split cross-validation
Another, very flexible strategy for cross-validation is *shuffle-split cross-validation*. In shuffle-split cross-validation, each split samples **train_size** many points for the training set and **test_size** many (disjoint) point for the test set. This splitting is repeated **n_iter** times. You can use integers for **train_size** and **test_size** to use absolute sizes for these sets, or floating-point numbers to use fractions of the whole dataset.
Since the sampling in *shuffle-split cross-validation* is done in a random fashion, this is a safer alternative to default *k-Fold Cross-Validation* when the data isn't truly randomized.
*scikit-learn* supports shuffle-split cross-validation via the [ShuffleSplit](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.ShuffleSplit.html#sklearn.model_selection.ShuffleSplit) class in the *model_selection* module.
There is also a stratified variant of ShuffleSplit, aptly named [StratifiedShuffleSplit](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.StratifiedShuffleSplit.html#sklearn.model_selection.StratifiedShuffleSplit), which can provide more reliable results for classification tasks.
```
# Let's look at the boston housing dataset again using shuffle-split cross-validation to ensure random sampling
# The following code splits the dataset into 80% training set and 20% test set for 3 iterations:
from sklearn.model_selection import ShuffleSplit
shuffle_split = ShuffleSplit(test_size=.8, train_size=.2, n_splits=3)
scores = cross_val_score(lr, boston.data, boston.target, cv=shuffle_split)
print("Cross-validation scores:\n{}".format(scores))
```
## Grid Search
Now that we know how to evaluate how well a model generalizes, we can take the next step and improve the model’s generalization performance by tuning its parameters. We discussed the parameter settings of the Ridge model for ridge regression earlier. Finding the values of the important parameters of a model (the ones that provide the best generalization performance) is a tricky task, but necessary for almost all models and datasets. Because it is such a common task, there are standard methods in *scikit-learn* to help you with it. The most commonly used method is grid search, which basically means trying all possible combinations of the parameters of interest.
Consider the case of ridge regression, as implemented in the Ridge class. As we discussed earlier, there is one important parameters: the regularization parameter, *alpha*. Say we want to try the values 0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 50, and 100 for *alpha*. Because we have eleven different settings for *alpha* and *alpha* is the only parameter, we have 11 combinations of parameters in total. Looking at all possible combinations creates a table (or grid) of parameter settings for the Ridge regression model.
### Simple Grid Search
We can implement a simple grid search just as a for loop over the parameter, training and evaluating a classifier for each value:
```
X, y = load_extended_boston(scaler='standard')
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
print("Size of training set: {} size of test set: {}".format(X_train.shape[0], X_test.shape[0]))
best_score = 0
for alpha in [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 50, 100]:
# for each combination of parameters, train an SVC
ridge = Ridge(alpha=alpha)
ridge.fit(X_train, y_train)
# evaluate the SVC on the test set
score = ridge.score(X_test, y_test)
# if we got a better score, store the score and parameters
if score > best_score:
best_score = score
best_parameters = {'alpha': alpha}
print("Best score: {:.2f}".format(best_score))
print("Best parameters: {}".format(best_parameters))
```
### The Danger of Overfitting the Parameters and the Validation Set
Given this result, we might be tempted to report that we found a model that performs with 78% accuracy on our dataset. However, this claim could be overly optimistic (or just wrong), for the following reason: we tried many different parameters and selected the one with best accuracy on the test set, but this accuracy won’t necessarily carry over to new data. Because we used the test data to adjust the parameters, we can no longer use it to assess how good the model is. This is the same reason we needed to split the data into training and test sets in the first place; we need an independent dataset to evaluate, one that was not used to create the model.
One way to resolve this problem is to split the data again, so we have three sets: the training set to build the model, the validation (or development) set to select the parameters of the model, and the test set to evaluate the performance of the selected parameters.
After selecting the best parameters using the validation set, we can rebuild a model using the parameter settings we found, but now training on both the training data and the validation data. This way, we can use as much data as possible to build our model. This leads to the following implementation:
```
X, y = load_extended_boston(scaler='standard')
# split data into train+validation set and test set
X_trainval, X_test, y_trainval, y_test = train_test_split(X, y, random_state=0)
# split train+validation set into training and validation sets
X_train, X_valid, y_train, y_valid = train_test_split(X_trainval, y_trainval, random_state=1)
print("Size of training set: {} size of validation set: {} size of test set:"
" {}\n".format(X_train.shape[0], X_valid.shape[0], X_test.shape[0]))
best_score = 0
for alpha in [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 50, 100]:
# for each combination of parameters, train an SVC
ridge = Ridge(alpha=alpha)
ridge.fit(X_train, y_train)
# evaluate the Ridge on the test set
score = ridge.score(X_valid, y_valid)
# if we got a better score, store the score and parameters
if score > best_score:
best_score = score
best_parameters = {'alpha': alpha}
# rebuild a model on the combined training and validation set,
# and evaluate it on the test set
ridge = Ridge(**best_parameters)
ridge.fit(X_trainval, y_trainval)
test_score = ridge.score(X_test, y_test)
print("Best score on validation set: {:.2f}".format(best_score))
print("Best parameters: ", best_parameters)
print("Test set score with best parameters: {:.2f}".format(test_score))
```
The best score on the validation set is 92%. However, the score on the test set—the score that actually tells us how well we generalize—is lower, at 78%. So we can claim to classify new data 78% correctly. This happens to be the same as before, now we can make a stronger claim since the final test set wasn't used in any way shape or form during hyper-parameter tuning.
The distinction between the training set, validation set, and test set is fundamentally important to applying machine learning methods in practice. Any choices made based on the test set accuracy “leak” information from the test set into the model. Therefore, it is important to keep a separate test set, which is only used for the final evaluation. It is good practice to do all exploratory analysis and model selection using the combination of a training and a validation set, and reserve the test set for a final evaluation—this is even true for exploratory visualization. Strictly speaking, evaluating more than one model on the test set and choosing the better of the two will result in an overly optimistic estimate of how accurate the model is.
### Grid Search with Cross-Validation
While the method of splitting the data into a training, a validation, and a test set that we just saw is workable, and relatively commonly used, it is quite sensitive to how exactly the data is split. From the output of the previous code snippet we can see that GridSearchCV selects 'alhpa': 50 as the best parameter. But if we were to take a different part of the training data as the validation set, it may optimize for a different value. For a better estimate of the generalization performance, instead of using a single split into a training and a validation set, we can use cross-validation to evaluate the performance of each parameter combination. This method can be coded up as follows:
```
best_score = 0
for alpha in [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 50, 100]:
# for each combination of parameters, train an SVC
ridge = Ridge(alpha=alpha)
# perform cross-validation
scores = cross_val_score(ridge, X_trainval, y_trainval, cv=5)
# compute mean cross-validation accuracy
score = np.mean(scores)
# if we got a better score, store the score and parameters
if score > best_score:
best_score = score
best_parameters = {'alpha': alpha}
# rebuild a model on the combined training and validation set,
# and evaluate it on the test set
ridge = Ridge(**best_parameters)
ridge.fit(X_trainval, y_trainval)
test_score = ridge.score(X_test, y_test)
print("Best score on validation set: {:.2f}".format(best_score))
print("Best parameters: ", best_parameters)
print("Test set score with best parameters: {:.2f}".format(test_score))
```
To evaluate the accuracy of the Ridge Regression model using a particular setting of alpha using five-fold cross-validation, we need to train 11 * 5 = 55 models. As you can imagine, the main downside of the use of cross-validation is the time it takes to train all these models. However, as you can see here, it is a more reliable method which is less sensitive to how precisely the validation set is sampled from the overall trainin set, and thus more likely to generalize well.
### GridSearchCV
Because grid search with cross-validation is such a commonly used method to adjust parameters, *scikit-learn* provides the [GridSearchCV](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html#sklearn.model_selection.GridSearchCV) class, which implements it in the form of an estimator. To use the **GridSearchCV** class, you first need to specify the parameters you want to search over using a dictionary. GridSearchCV will then perform all the necessary model fits. The keys of the dictionary are the names of parameters we want to adjust (as given when constructing the model—in this case, alpha), and the values are the parameter settings we want to try out. Trying the values 0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 50, and 100 for alpha translates to the following dictionary:
```
param_grid = {'alpha': [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 50, 100]}
print("Parameter grid:\n{}".format(param_grid))
```
We can now instantiate the **GridSearchCV** class with the model (*Ridge*), the parameter grid to search (*param_grid*), and the cross-validation strategy we want to use (say, five-fold stratified cross-validation):
```
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import Ridge
grid_search = GridSearchCV(Ridge(), param_grid, cv=5)
```
**GridSearchCV** will use cross-validation in place of the split into a training and validation set that we used before. However, we still need to split the data into a training and a test set, to avoid overfitting the parameters:
```
X, y = load_extended_boston(scaler='standard')
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
```
The *grid_search* object that we created behaves just like a classifier; we can call the standard methods **fit**, **predict**, and **score** on it. However, when we call **fit**, it will run cross-validation for each combination of parameters we specified in param_grid:
```
grid_search.fit(X_train, y_train)
```
Fitting the **GridSearchCV** object not only searches for the best parameters, but also automatically fits a new model on the whole training dataset with the parameters that yielded the best cross-validation performance. What happens in fit is therefore equivalent to the result of the code we saw at the beginning of this section. The **GridSearchCV** class provides a very convenient interface to access the retrained model using the predict and score methods. To evaluate how well the best found parameters generalize, we can call score on the test set:
```
print("Test set score: {:.2f}".format(grid_search.score(X_test, y_test)))
```
Choosing the parameters using cross-validation, we actually found a model that achieves 77% accuracy on the test set. The important thing here is that we *did not use the test set* to choose the parameters. The parameters that were found are scored in the **`best_params_`** attribute, and the best cross-validation accuracy (the mean accuracy over the different splits for this parameter setting) is stored in **`best_score_`**:
```
print("Best parameters: {}".format(grid_search.best_params_))
print("Best cross-validation score: {:.2f}".format(grid_search.best_score_))
```
Sometimes it is helpful to have access to the actual model that was found—for example, to look at coefficients or feature importances. You can access the model with the best parameters trained on the whole training set using the **`best_estimator_`** attribute:
```
print("Best estimator:\n{}".format(grid_search.best_estimator_))
```
Because *grid_search* itself has **predict** and **score** methods, using **`best_estimator_`** is not needed to make predictions or evaluate the model.
### Putting it all together
The one thing we didn't do was experiment with different train/test splits. Let's run it with randomness a bunch of times and see how consistent it is:
```
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import Ridge
param_grid = {'alpha': [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 50, 100]}
grid_search = GridSearchCV(Ridge(), param_grid, cv=5)
X, y = load_extended_boston(scaler='standard')
for i in range(10):
X_train, X_test, y_train, y_test = train_test_split(X, y)
grid_search.fit(X_train, y_train)
print("Run {} - Test set score: {:.2f} Best parameters: {}".format(i, grid_search.score(X_test, y_test),
grid_search.best_params_))
```
| github_jupyter |
# Parameter Management
Once we have chosen an architecture
and set our hyperparameters,
we proceed to the training loop,
where our goal is to find parameter values
that minimize our objective function.
After training, we will need these parameters
in order to make future predictions.
Additionally, we will sometimes wish
to extract the parameters
either to reuse them in some other context,
to save our model to disk so that
it may be exectuted in other software,
or for examination in the hopes of
gaining scientific understanding.
Most of the time, we will be able
to ignore the nitty-gritty details
of how parameters are declared
and manipulated, relying on DJL
to do the heavy lifting.
However, when we move away from
stacked architectures with standard layers,
we will sometimes need to get into the weeds
of declaring and manipulating parameters.
In this section, we cover the following:
* Accessing parameters for debugging, diagnostics, and visualiziations.
* Parameter initialization.
* Sharing parameters across different model components.
We start by focusing on an MLP with one hidden layer.
```
%mavenRepo snapshots https://oss.sonatype.org/content/repositories/snapshots/
%maven ai.djl:api:0.7.0-SNAPSHOT
%maven ai.djl:model-zoo:0.7.0-SNAPSHOT
%maven org.slf4j:slf4j-api:1.7.26
%maven org.slf4j:slf4j-simple:1.7.26
%maven net.java.dev.jna:jna:5.3.0
%maven ai.djl.mxnet:mxnet-engine:0.7.0-SNAPSHOT
%maven ai.djl.mxnet:mxnet-native-auto:1.7.0-a
import ai.djl.*;
import ai.djl.ndarray.*;
import ai.djl.ndarray.types.*;
import ai.djl.ndarray.index.*;
import ai.djl.nn.*;
import ai.djl.nn.core.*;
import ai.djl.training.*;
import ai.djl.training.initializer.*;
import ai.djl.training.dataset.*;
import ai.djl.util.*;
import ai.djl.translate.*;
import ai.djl.inference.Predictor;
NDManager manager = NDManager.newBaseManager();
NDArray x = manager.randomUniform(0, 1, new Shape(2, 4));
Model model = Model.newInstance("lin-reg");
SequentialBlock net = new SequentialBlock();
net.add(Linear.builder().setUnits(8).build());
net.add(Activation.reluBlock());
net.add(Linear.builder().setUnits(1).build());
net.setInitializer(new NormalInitializer());
net.initialize(manager, DataType.FLOAT32, x.getShape());
model.setBlock(net);
Predictor<NDList, NDList> predictor = model.newPredictor(new NoopTranslator());
predictor.predict(new NDList(x)).singletonOrThrow(); // forward computation
```
## Parameter Access
Let us start with how to access parameters
from the models that you already know.
Each layer's parameters are conveniently stored in a `Pair<String, Parameter>` consisting of a unique
`String` that serves as a key for the layer and the `Parameter` itself.
The `ParameterList` is an extension of `PairList` and is returned with a call to the `getParameters()` method on a `Block`.
We can inspect the parameters of the `net` defined above.
When a model is defined via the `SequentialBlock` class,
we can access any layer's `Pair<String, Parameter>` by calling `get()` on the `ParameterList` and passing in the index
of the parameter we want. Calling `getKey()` and `getValue()` on a `Pair<String, Parameter>` will get the parameter's name and `Parameter` respectively. We can also directly get the `Parameter` we want from the `ParameterList`
by calling `get()` and passing in its unique key(the `String` portion of the `Pair<String, Parameter>`. If we call `valueAt()` and pass in
the index, we will get the `Parameter` directly as well.
```
ParameterList params = net.getParameters();
// Print out all the keys (unique!)
for (var pair : params) {
System.out.println(pair.getKey());
}
// Use the unique key to access the Parameter
NDArray dense0Weight = params.get("01Linear_weight").getArray();
NDArray dense0Bias = params.get("01Linear_bias").getArray();
// Use indexing to access the Parameter
NDArray dense1Weight = params.valueAt(2).getArray();
NDArray dense1Bias = params.valueAt(3).getArray();
System.out.println(dense0Weight);
System.out.println(dense0Bias);
System.out.println(dense1Weight);
System.out.println(dense1Bias);
```
The output tells us a few important things.
First, each fully-connected layer
has two parameters, e.g.,
`dense0Weight` and `dense0Bias`,
corresponding to that layer's
weights and biases, respectively.
The `params` variable is a `ParameterList` which contain the
key-value pairs of the layer name and a parameter of the
`Parameter` class.
With a `Parameter`, we can get the underlying numerical values as `NDArray`s by calling
`getArray()` on them!
Both the weights and biases are stored as single precision floats(`FLOAT32`).
### Targeted Parameters
Parameters are complex objects,
containing data, gradients,
and additional information.
That's why we need to request the data explicitly.
Note that the bias vector consists of zeroes
because we have not updated the network
since it was initialized.
Note that unlike the biases, the weights are nonzero.
This is because unlike biases,
weights are initialized randomly.
In addition to `getArray()`, each `Parameter`
also provides a `requireGradient()` method which
returns whether the parameter needs gradients to be computed
(which we set on the `NDArray` with `attachGradient()`).
The gradient has the same shape as the weight.
To actually access the gradient, we simply call `getGradient()` on the
`NDArray`.
Because we have not invoked backpropagation
for this network yet, its values are all 0.
We would invoke it by creating a `GradientCollector` instance and
run our calculations inside it.
```
dense0Weight.getGradient();
```
### Collecting Parameters from Nested Blocks
Let us see how the parameter naming conventions work
if we nest multiple blocks inside each other.
For that we first define a function that produces Blocks
(a Block factory, so to speak) and then
combine these inside yet larger Blocks.
```
public SequentialBlock block1() {
SequentialBlock net = new SequentialBlock();
net.add(Linear.builder().setUnits(32).build());
net.add(Activation.reluBlock());
net.add(Linear.builder().setUnits(16).build());
net.add(Activation.reluBlock());
return net;
}
public SequentialBlock block2() {
SequentialBlock net = new SequentialBlock();
for (int i = 0; i < 4; i++) {
net.add(block1());
}
return net;
}
SequentialBlock rgnet = new SequentialBlock();
rgnet.add(block2());
rgnet.add(Linear.builder().setUnits(10).build());
rgnet.setInitializer(new NormalInitializer());
rgnet.initialize(manager, DataType.FLOAT32, x.getShape());
Model model = Model.newInstance("rgnet");
model.setBlock(rgnet);
Predictor<NDList, NDList> predictor = model.newPredictor(new NoopTranslator());
predictor.predict(new NDList(x)).singletonOrThrow();
```
Now that we have designed the network,
let us see how it is organized.
We can get the list of named parameters by calling `getParameters()`.
However, we not only want to see the parameters, but also how
our network is structured.
To see our network architecture, we can simply print out the block whose architecture we want to see.
```
/* Network Architecture for RgNet */
rgnet
/* Parameters for RgNet */
for (var param : rgnet.getParameters()) {
System.out.println(param.getValue().getArray());
}
```
Since the layers are hierarchically nested,
we can also access them by calling their `getChildren()` method
to get a `BlockList`(also an extension of `PairList`) of their inner blocks.
It shares methods with `ParameterList` and as such we can use their
familiar structure to access the blocks. We can call `get(i)` to get the
`Pair<String, Block>` at the index `i` we want, and then finally `getValue()` to get the actual
block. We can do this in one step as shown above with `valueAt(i)`. Then we have to repeat that to get that blocks child and so on.
Here, we access the first major block,
within it the second subblock,
and within that the bias of the first layer,
with as follows:
```
Block majorBlock1 = rgnet.getChildren().get(0).getValue();
Block subBlock2 = majorBlock1.getChildren().valueAt(1);
Block linearLayer1 = subBlock2.getChildren().valueAt(0);
NDArray bias = linearLayer1.getParameters().valueAt(1).getArray();
bias
```
## Parameter Initialization
Now that we know how to access the parameters,
let us look at how to initialize them properly.
We discussed the need for initialization in :numref:`sec_numerical_stability`.
By default, DJL initializes weight matrices
based on your set initializer
and the bias parameters are all set to $0$.
However, we will often want to initialize our weights
according to various other protocols.
DJL's `ai.djl.training.initializer` package provides a variety
of preset initialization methods.
If we want to create a custom initializer,
we need to do some extra work.
### Built-in Initialization
In DJL, when setting the initializer for blocks, the default `setInitializer()` function does not overwrite
any previous set initializers. So if you set an initializer earlier, but decide you want to change your initializer and call `setInitializer()` again, the second `setInitializer()` will NOT overwrite your first one.
Additionally, when you call `setInitializer()` on a block, all internal blocks will also call `setInitializer()` with the same given `initializer`.
This means that we can call `setInitializer()` on the highest level of a block and know that all internal blocks that do not have an initializer already set will be set to that given `initializer`.
This setup has the advantage that we don't have to worry about our `setInitializer()` overriding our previous `initializer`s on internal blocks!
If you want to however, you can explicitly set an initializer for a `Parameter` by calling its `setInitializer()` function directly and passing in `true` to the overwrite input.
Simply loop over all the parameters returned from `getParameters()` and set their initializers directly!
Let us begin by calling on built-in initializers.
The code below initializes all parameters
to a given constant value 1,
by using the `ConstantInitializer()` initializer.
Note that this will not do anything currently since we have already set
our initializer in the previous code block.
We can verify this by checking the weight of a parameter.
```
net.setInitializer(new ConstantInitializer(1));
net.initialize(manager, DataType.FLOAT32, x.getShape());
Block linearLayer = net.getChildren().get(0).getValue();
NDArray weight = linearLayer.getParameters().get(0).getValue().getArray();
weight
```
We can see these initializations however if we create a new network.
Let us write a function to create these network architectures for us
conveniently.
```
public SequentialBlock getNet() {
SequentialBlock net = new SequentialBlock();
net.add(Linear.builder().setUnits(8).build());
net.add(Activation.reluBlock());
net.add(Linear.builder().setUnits(1).build());
return net;
}
```
If we run our previous initializer on this new net and check a parameter, we'll
see that everything is initialized properly! (to 7777!)
```
SequentialBlock net = getNet();
net.setInitializer(new ConstantInitializer(7777));
net.initialize(manager, DataType.FLOAT32, x.getShape());
Block linearLayer = net.getChildren().valueAt(0);
NDArray weight = linearLayer.getParameters().valueAt(0).getArray();
weight
```
We can also initialize all parameters
as Gaussian random variables
with standard deviation $.01$.
```
SequentialBlock net = getNet();
net.setInitializer(new NormalInitializer());
net.initialize(manager, DataType.FLOAT32, x.getShape());
Block linearLayer = net.getChildren().valueAt(0);
NDArray weight = linearLayer.getParameters().valueAt(0).getArray();
weight
```
We can also apply different initializers for certain Blocks.
For example, below we initialize the first layer
with the `Xavier` initializer
and initialize the second layer
to a constant value of 0.
We will do this without the `getNet()` function as it will be easier
to have the reference to each block we want to set.
```
SequentialBlock net = new SequentialBlock();
Linear linear1 = Linear.builder().setUnits(8).build();
net.add(linear1);
net.add(Activation.reluBlock());
Linear linear2 = Linear.builder().setUnits(1).build();
net.add(linear2);
linear1.setInitializer(new XavierInitializer());
linear1.initialize(manager, DataType.FLOAT32, x.getShape());
linear2.setInitializer(Initializer.ZEROS);
linear2.initialize(manager, DataType.FLOAT32, x.getShape());
System.out.println(linear1.getParameters().valueAt(0).getArray());
System.out.println(linear2.getParameters().valueAt(0).getArray());
```
Finally, we can loop over the `ParameterList` and set their initializers individually.
When setting initializers directly on the `Parameter`, you must pass in an `overwrite`
boolean along with the initializer to declare whether you want your current
initializer to overwrite the previous initializer if one has already been set.
Here, we do want to overwrite and so pass in `true`.
For this example, however, since we haven't set the `weight` initializers before, there is no initializer to overwrite so we could pass in `false` and still have the same outcome.
However, since `bias` parameters are automatically set to initialize at 0, to properly set our intializer here, we have to set overwrite to `true`.
```
SequentialBlock net = getNet();
ParameterList params = net.getParameters();
for (int i = 0; i < params.size(); i++) {
// Here we interleave initializers.
// We initialize parameters at even indexes to 0
// and parameters at odd indexes to 2.
Parameter param = params.valueAt(i);
if (i % 2 == 0) {
// All weight parameters happen to be at even indices.
// We set them to initialize to 0.
// There is no need to overwrite
// since no initializer has been set for them previously.
param.setInitializer(new ConstantInitializer(0), false);
}
else {
// All bias parameters happen to be at odd indices.
// We set them to initialize to 2.
// To set the initializer here properly, we must pass in true
// for overwrite
// since bias parameters automatically have their
// initializer set to 0.
param.setInitializer(new ConstantInitializer(2), true);
}
}
net.initialize(manager, DataType.FLOAT32, x.getShape());
for (var param : net.getParameters()) {
System.out.println(param.getKey());
System.out.println(param.getValue().getArray());
}
```
### Custom Initialization
Sometimes, the initialization methods we need
are not standard in DJL.
In these cases, we can define a class to implement the `Initializer` interface.
We only have to implement the `initialize()` function,
which takes an `NDManager`, a `Shape`, and the `DataType`.
We then create the `NDArray` with the aforementioned `Shape` and `DataType`
and initialize it to what we want! You can also design your
initializer to take in some parameters. Simply declare them
as fields in the class and pass them in as inputs to the constructor!
In the example below, we define an initializer
for the following strange distribution:
$$
\begin{aligned}
w \sim \begin{cases}
U[5, 10] & \text{ with probability } \frac{1}{4} \\
0 & \text{ with probability } \frac{1}{2} \\
U[-10, -5] & \text{ with probability } \frac{1}{4}
\end{cases}
\end{aligned}
$$
```
class MyInit implements Initializer {
public MyInit() {}
@Override
public NDArray initialize(NDManager manager, Shape shape, DataType dataType) {
System.out.printf("Init %s\n", shape.toString());
// Here we generate data points
// from a uniform distribution [-10, 10]
NDArray data = manager.randomUniform(-10, 10, shape, dataType);
// We keep the data points whose absolute value is >= 5
// and set the others to 0.
// This generates the distribution `w` shown above.
NDArray absGte5 = data.abs().gte(5); // returns boolean NDArray where
// true indicates abs >= 5 and
// false otherwise
return data.mul(absGte5); // keeps true indices and sets false indices to 0.
// special operation when multiplying a numerical
// NDArray with a boolean NDArray
}
}
SequentialBlock net = getNet();
net.setInitializer(new MyInit());
net.initialize(manager, DataType.FLOAT32, x.getShape());
Block linearLayer = net.getChildren().valueAt(0);
NDArray weight = linearLayer.getParameters().valueAt(0).getArray();
weight
```
Note that we always have the option
of setting parameters directly by calling `getValue().getArray()`
to access the underlying `NDArray`.
A note for advanced users:
you cannot directly modify parameters within a `GarbageCollector` scope.
You must modify them outside the `GarbageCollector` scope to avoid confusing
the automatic differentiation mechanics.
```
// '__'i() is an inplace operation to modify the original NDArray
NDArray weightLayer = net.getChildren().valueAt(0)
.getParameters().valueAt(0).getArray();
weightLayer.addi(7);
weightLayer.divi(9);
weightLayer.set(new NDIndex(0, 0), 2020); // set the (0, 0) index to 2020
weightLayer;
```
## Tied Parameters
Often, we want to share parameters across multiple layers.
Later we will see that when learning word embeddings,
it might be sensible to use the same parameters
both for encoding and decoding words.
We discussed one such case when we introduced :numref:`sec_model_construction`.
Let us see how to do this a bit more elegantly.
In the following we allocate a dense layer
and then use its parameters specifically
to set those of another layer.
```
SequentialBlock net = new SequentialBlock();
// We need to give the shared layer a name
// such that we can reference its parameters
Block shared = Linear.builder().setUnits(8).build();
SequentialBlock sharedRelu = new SequentialBlock();
sharedRelu.add(shared);
sharedRelu.add(Activation.reluBlock());
net.add(Linear.builder().setUnits(8).build());
net.add(Activation.reluBlock());
net.add(sharedRelu);
net.add(sharedRelu);
net.add(Linear.builder().setUnits(10).build());
NDArray x = manager.randomUniform(-10f, 10f, new Shape(2, 20), DataType.FLOAT32);
net.setInitializer(new NormalInitializer());
net.initialize(manager, DataType.FLOAT32, x.getShape());
model.setBlock(net);
Predictor<NDList, NDList> predictor = model.newPredictor(new NoopTranslator());
System.out.println(predictor.predict(new NDList(x)).singletonOrThrow());
// Check that the parameters are the same
NDArray shared1 = net.getChildren().valueAt(2)
.getParameters().valueAt(0).getArray();
NDArray shared2 = net.getChildren().valueAt(3)
.getParameters().valueAt(0).getArray();
shared1.eq(shared2);
```
This example shows that the parameters
of the second and third layer are tied.
They are not just equal, they are
represented by the same exact `NDArray`.
Thus, if we change one of the parameters,
the other one changes, too.
You might wonder,
*when parameters are tied
what happens to the gradients?*
Since the model parameters contain gradients,
the gradients of the second hidden layer
and the third hidden layer are added together
in `shared.getGradient()` during backpropagation.
## Summary
* We have several ways to access, initialize, and tie model parameters.
* We can use custom initialization.
* DJL has a sophisticated mechanism for accessing parameters in a unique and hierarchical manner.
## Exercises
1. Use the FancyMLP defined in :numref:`sec_model_construction` and access the parameters of the various layers.
1. Look at the [DJL documentation](https://javadoc.io/doc/ai.djl/api/latest/ai/djl/training/initializer/Initializer.html) and explore different initializers.
1. Try accessing the model parameters after `net.initialize()` and before `predictor.predict(x)` to observe the shape of the model parameters. What changes? Why?
1. Construct a multilayer perceptron containing a shared parameter layer and train it. During the training process, observe the model parameters and gradients of each layer.
1. Why is sharing parameters a good idea?
| github_jupyter |
```
from tensorflow.python.client import device_lib
device_lib.list_local_devices()
import segmentation_models as sm
import tensorflow as tf
from pycocotools.coco import COCO
from pathlib import Path
import numpy as np
from typing import Final
import plotly.express as px
from matplotlib import pyplot as plt
import cv2
from albumentations import Compose, VerticalFlip, HorizontalFlip, ShiftScaleRotate, RandomSizedCrop
image_size:Final[int] = 256
annotation_file = Path("data/annotations/instances_default.json")
coco = COCO(str(annotation_file))
for i in coco.getCatIds():
if coco.loadCats(i)[0]["name"] == "Crack":
cat_id = coco.loadCats(i)[0]["id"]
def create_mask(anns, cat_id):
mask = np.zeros((image_size, image_size, 1))
for i in range(len(anns)):
if anns[i]["category_id"] == cat_id:
cv2.fillPoly(mask, pts=[np.array(anns[i]["segmentation"], dtype=np.int32).reshape(-1,1, 2)], color=(1))
return mask
def read_data(idx, cat_id):
anns = coco.loadAnns(coco.getAnnIds(idx))
mask = create_mask(anns, cat_id)
image = np.array(tf.keras.preprocessing.image.load_img(str(Path("data/images/annotated") / coco.loadImgs(idx)[0]["file_name"]))) / 255
return image, mask
def file_load_generator(cat_id, coco, split=15, is_train=True):
transform = Compose([VerticalFlip(), HorizontalFlip(), ShiftScaleRotate()])
ids = coco.getImgIds()[:split] if is_train else coco.getImgIds()[split:]
for idx in ids:
image, mask = read_data(idx, cat_id)
ret = transform(image=image, mask=mask)
yield ret["image"], ret["mask"]
ds = tf.data.Dataset.from_generator(lambda: file_load_generator(cat_id, coco), (tf.float32, tf.float32)).batch(5)
for image, label in ds:
img = image[0]
lbl = label[0]
image, label = read_data(15, cat_id)
fig = plt.figure()
ax1 = fig.add_subplot(1, 2, 1)
ax1.set_title("image",fontsize=20)
plt.imshow(image)
ax2 = fig.add_subplot(1, 2, 2)
ax2.set_title("label",fontsize=20)
plt.imshow(label)
def non_clack_generator(split=180, is_train=True):
transform = Compose([VerticalFlip(), HorizontalFlip(), ShiftScaleRotate()])
files = [f for f in Path("data/images/non_clack").glob("*")]
files = files[:split] if is_train else files[split:]
for file in files:
image = np.expand_dims(tf.keras.preprocessing.image.load_img(file),axis=0)
ret = transform(image=image)
yield ret["image"], ret["image"]
train_ds = tf.data.Dataset.from_generator(lambda: file_load_generator(cat_id, coco), (tf.float32, tf.float32)).batch(5)
test_ds = tf.data.Dataset.from_generator(lambda: file_load_generator(cat_id, coco, is_train=False), (tf.float32, tf.float32))
non_clack_images = [np.expand_dims(tf.keras.preprocessing.image.load_img(p),axis=0) for p in Path("data/images/non_clack").glob("*")]
non_clack_images = np.concatenate(non_clack_images) / 255
recon_train_ds = tf.data.Dataset.from_tensor_slices((non_clack_images[:180,:,:,:], non_clack_images[:180,:,:,:])).batch(8)
recon_test_ds = tf.data.Dataset.from_tensor_slices((non_clack_images[180:,:,:,:], non_clack_images[180:,:,:,:]))
#recon_train_ds = tf.data.Dataset.from_generator(lambda: non_clack_generator(), (tf.float32, tf.float32)).batch(8)
#recon_test_ds = tf.data.Dataset.from_generator(lambda: non_clack_generator(is_train=False), (tf.float32, tf.float32))
try:
segment_model = tf.keras.models.load_model("segmentation.hf5")
except:
segment_model = sm.Unet(input_shape=(256, 256, 3))
class ClackAnnotNet(tf.keras.Model):
def __init__(self):
super(ClackAnnotNet, self).__init__()
self.model = sm.Unet(input_shape=(256, 256, 3), classes=3)
self.model = tf.keras.Model(inputs=self.model.input, outputs=self.model.get_layer("final_conv").output)
self.model = tf.keras.Sequential([self.model, tf.keras.layers.Activation("sigmoid", name="Sigmoid")])
def call(self, inputs):
return self.model(inputs)
try:
model = tf.keras.models.load_model("reconstruct.hf5")
except:
model = ClackAnnotNet()
```
## 再構成モデル
```
callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=100)
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-4), loss="mse")
model.fit(recon_train_ds, epochs=1000, validation_data=recon_test_ds, callbacks=[callback])
```
## セグメンテーションモデル
```
#callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=30)
#segment_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-4), loss="binary_crossentropy",metrics=["binary_crossentropy"])
segment_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-3), loss="mae",metrics=["mae"])
segment_model.fit(train_ds, epochs=1000)
model.save("reconstruct")
segment_model.save("segmentation")
```
## 可視化
```
fig = plt.figure()
ax1 = fig.add_subplot(1, 2, 1)
ax1.set_title("pred",fontsize=20)
plt.imshow(model(tf.expand_dims(image, axis=0))[0])
ax2 = fig.add_subplot(1, 2, 2)
ax2.set_title("image",fontsize=20)
plt.imshow(image)
fig = plt.figure()
ax1 = fig.add_subplot(1, 2, 1)
ax1.set_title("pred",fontsize=20)
plt.imshow(segment_model(tf.expand_dims(image, axis=0))[0])
ax2 = fig.add_subplot(1, 2, 2)
ax2.set_title("label",fontsize=20)
plt.imshow(label)
t=segment_model(tf.expand_dims(image, axis=0))[0].numpy()
#tf.reduce_sum(tf.where(tf.greater(t,), t, tf.zeros_like(t)))
len(np.where(t>0.1)[0])
nc_image = non_clack_images[180]
fig = plt.figure()
ax1 = fig.add_subplot(1, 2, 1)
ax1.set_title("pred",fontsize=20)
plt.imshow(nc_image)
ax2 = fig.add_subplot(1, 2, 2)
ax2.set_title("label",fontsize=20)
plt.imshow((tf.keras.losses.MSE(model(np.expand_dims(nc_image, axis=0)), nc_image).numpy()[0] > 0.01) * 255)
fig = plt.figure()
ax1 = fig.add_subplot(1, 2, 1)
ax1.set_title("pred",fontsize=20)
plt.imshow(image)
ax2 = fig.add_subplot(1, 2, 2)
ax2.set_title("label",fontsize=20)
plt.imshow((tf.keras.losses.MSE(model(np.expand_dims(image, axis=0)), image).numpy()[0] > 0.01) * 255)
```
## 特徴空間分離
```
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
import pandas as pd
feature_model = model.get_layer("sequential_1").get_layer("model_12")
feature_model = tf.keras.Model(inputs=feature_model.input, outputs=feature_model.get_layer("block3_pool").output)
# 特徴量抽出モデル
test = train_ds.unbatch()
#a_x = [tf.keras.losses.MAE(model(np.expand_dims(x, axis=0)), x).numpy().flatten() for x, y in test]
a_x = [feature_model(np.expand_dims(x, axis=0)).numpy().flatten() for x, y in test]
a_y = [0] * len(a_x)
a_c = ["ひびあり"] * len(a_x)
a_n = []
for idx in coco.getImgIds()[:15]:
anns = coco.loadAnns(coco.getAnnIds(idx))
a_n.append(coco.loadImgs(idx)[0]["file_name"])
unb_test = recon_train_ds.unbatch()
#b_x = [tf.keras.losses.MAE(model(np.expand_dims(x, axis=0)), x).numpy().flatten() for x, y in unb_test]
b_x = [feature_model(np.expand_dims(x, axis=0)).numpy().flatten() for x, y in unb_test]
b_y = [1] * len(b_x)
b_c = ["ひびなし"] * len(b_x)
b_n = [str(p.name) for p in Path("./data/images/non_clack").glob("*")]
a_x.extend(b_x)
a_y.extend(b_y)
a_c.extend(b_c)
a_n.extend(b_n[:180])
feature = TSNE(n_components=2).fit_transform(np.array(a_x))
print(len(a_x), len(a_c), len(a_n))
plot_df = pd.DataFrame({"f1":feature[:, 0],"f2":feature[:, 1],"color":a_c, "name":a_n})
#plt.scatter(feature[:, 0], feature[:, 1], alpha=0.8, color=a_c)
fig = px.scatter(plot_df, x="f1", y="f2", color="color", hover_name="name" )
fig.update_layout(width=800, height=600)
#result = tf.concat([tf.concat([model(image) for image, _, in test_ds], axis=0), tf.concat([model(image) for image, _, in recon_test_ds], axis=0)], axis=0)
#score = [len(np.where(t.numpy()>0.1)[0]) for t in result]
#score
counts = []
for image,_ in test_ds:
result = (tf.keras.losses.MSE(model(np.expand_dims(image, axis=0)), image).numpy()[0] > 0.001) * 255
counts.append(len(np.where(result==255)[0]))
for image,_ in recon_test_ds:
result = (tf.keras.losses.MSE(model(np.expand_dims(image, axis=0)), image).numpy()[0] > 0.001) * 255
counts.append(len(np.where(result==255)[0]))
labels = [1]*5
labels.extend([0]*20)
from sklearn.metrics import roc_auc_score, roc_curve
fpr, tpr, th = roc_curve(labels, counts, drop_intermediate=False)
fpr, tpr, th
roc_auc_score(labels, counts)
plt.plot(fpr, tpr, marker='o')
```
| github_jupyter |
```
import sys
import os
import math, numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from numpy import linalg as LA
import numpy as np
infile = os.listdir('/users/timeifler/Dropbox/cosmolike_store/LSST_emu/cov/')
data = [x[4:29] for x in infile]
data= [i.replace('LSST_Y10','LSST_3x2pt_Y10') for i in data]
outname= [i.replace('cov_','') for i in infile]
for k in range(0,36):
print "------- NEW COV ----------"
print "/users/timeifler/Dropbox/cosmolike_store/LSST_emu/cov/"+infile[k]
print "------- NEW COV ----------"
datafile= np.genfromtxt("datav/"+data[k])
ndata=datafile.shape[0]
mask = np.zeros(ndata)
for i in range(0,datafile.shape[0]):
if (datafile[i,1] >1.0e-15):
mask[i]=1.0
covfile = np.genfromtxt("/users/timeifler/Dropbox/cosmolike_store/LSST_emu/cov/"+infile[k])
cov = np.ones((ndata,ndata))
print ndata,int(np.max(covfile[:,0])+1)
for i in range(0,covfile.shape[0]):
cov[int(covfile[i,0]),int(covfile[i,1])] = covfile[i,8]+covfile[i,9]
cov[int(covfile[i,1]),int(covfile[i,0])] = covfile[i,8]+covfile[i,9]
if 1. in cov[:, :]:
print "Covariance assembly incomplete - covparallel file(s) missing"
else:
print "Covariance assembly complete - all covparallel files present"
numpyfile="/users/timeifler/Dropbox/cosmolike_store/LSST_emu/npcov/npcov_"+outname[k]
np.save(numpyfile, cov)
loadfile="/users/timeifler/Dropbox/cosmolike_store/LSST_emu/npcov/npcov_"+outname[k]+".npy"
cov2=np.load(loadfile)
print cov.shape, cov2.shape
# cor = np.zeros((ndata,ndata))
# for i in range(0,ndata):
# for j in range(0,ndata):
# if (cov[i,i]*cov[j,j] >0):
# cor[i,j] = cov[i,j]/math.sqrt(cov[i,i]*cov[j,j])
# a = np.sort(LA.eigvals(cor[:,:]))
# print "min+max eigenvalues full cor:"
# print np.min(a), np.max(a)
# print "neg eigenvalues full cor:"
# for i in range(0,a.shape[0]):
# if (a[i]< 0.0): print a[i]
# inv = LA.inv(cov[0:ndata,0:ndata])
# a = np.sort(LA.eigvals(cov[0:ndata,0:ndata]))
# print "min+max eigenvalues 3x2 cov:"
# print np.min(a), np.max(a)
# outfile = "/users/timeifler/Dropbox/cosmolike_store/LSST_emu/inv/inv_"+outname[k]
# f = open(outfile, "w")
# for i in range(0,ndata):
# inv[i,i]=inv[i,i]*mask[i]
# for j in range(0,ndata):
# f.write("%d %d %e\n" %(i,j, inv[i,j]))
# f.close()
# maskindices=np.where(mask == 0)[0]
# covnew=np.delete(cov, maskindices, 0)
# covcut=np.delete(covnew, maskindices, 1)
# covzero=np.where(covcut == 0)[0]
# plt.figure()
# plt.imshow(np.log10(np.abs(covcut[:,:])), interpolation="nearest",vmin=-25, vmax=-10)
# plt.colorbar()
# savefile="/users/timeifler/Dropbox/cosmolike_store/LSST_emu/plots/covcut_"+outname[k]+".png"
# plt.savefig(savefile, format='png', dpi=2000)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/RenqinSS/Rec/blob/main/algo.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import random
import os
import numpy as np
import torch
SEED = 45
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
seed_everything(SEED)
!git clone https://github.com/dpoqb/wechat_big_data_baseline_pytorch.git
!dir
!mkdir data
!unzip ./drive/MyDrive/wechat_algo_data1.zip -d ./data
!pip install deepctr_torch
import torch
import os
print(torch.cuda.is_available())
for i in range(torch.cuda.device_count()):
print(torch.cuda.get_device_name(i))
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from tqdm import tqdm_notebook as tqdm
from sklearn.decomposition import PCA
from collections import defaultdict
import os
os.chdir('/content/wechat_big_data_baseline_pytorch')
# 存储数据的根目录
ROOT_PATH = "../data"
# 比赛数据集路径
DATASET_PATH = ROOT_PATH + '/wechat_algo_data1/'
# 训练集
USER_ACTION = DATASET_PATH + "user_action.csv"
FEED_INFO = DATASET_PATH + "feed_info.csv"
FEED_EMBEDDINGS = DATASET_PATH + "feed_embeddings.csv"
# 测试集
TEST_FILE = DATASET_PATH + "test_a.csv"
# 初赛待预测行为列表
ACTION_LIST = ["read_comment", "like", "click_avatar", "forward"]
FEA_COLUMN_LIST = ["read_comment", "like", "click_avatar", "forward", "comment", "follow", "favorite", "device"]
FEA_FEED_LIST = ['feedid', 'authorid', 'videoplayseconds', 'bgm_song_id', 'bgm_singer_id', 'manual_tag_list']
# 负样本下采样比例(负样本:正样本)
ACTION_SAMPLE_RATE = {"read_comment": 4, "like": 4, "click_avatar": 4, "forward": 10, "comment": 10, "follow": 10, "favorite": 10}
def process_embed(train):
feed_embed_array = np.zeros((train.shape[0], 512))
for i in tqdm(range(train.shape[0])):
x = train.loc[i, 'feed_embedding']
if x != np.nan and x != '':
y = [float(i) for i in str(x).strip().split(" ")]
else:
y = np.zeros((512,)).tolist()
feed_embed_array[i] += y
temp = pd.DataFrame(columns=[f"embed{i}" for i in range(512)], data=feed_embed_array)
train = pd.concat((train, temp), axis=1)
return train
def proc_tag(df, name='manual_tag_list', thre=5, max_len=5):
stat = defaultdict(int)
for row in df[name]:
if isinstance(row, str):
for tag in row.strip().split(';'):
stat[tag] += 1
zero_tags = set([tag for tag in stat if stat[tag] < thre]) # 低于频次的 tag
def tag_func(row, max_len=max_len):
ret = []
if isinstance(row, str):
for tag in row.strip().split(';'):
ret.append(0 if tag in zero_tags else int(tag) + 1)
ret = ret[:max_len] + [0] * (max_len - len(ret))
return ' '.join([str(n) for n in ret])
df[name] = df[name].apply(tag_func)
tag_vocab_size = max([int(tag) for tag in stat]) + 2
print('%s: vocab_size == %d' % (name, tag_vocab_size))
return df
def prepare_data():
feed_info_df = pd.read_csv(FEED_INFO)
feed_info_df = proc_tag(feed_info_df, name='manual_tag_list', thre=5, max_len=5)
user_action_df = pd.read_csv(USER_ACTION)[["userid", "date_", "feedid",] + FEA_COLUMN_LIST]
feed_info_df = feed_info_df[FEA_FEED_LIST]
test = pd.read_csv(TEST_FILE)
# add feed feature
train = pd.merge(user_action_df, feed_info_df, on='feedid', how='left')
test = pd.merge(test, feed_info_df, on='feedid', how='left')
test["videoplayseconds"] = np.log(test["videoplayseconds"] + 1.0)
test.to_csv(ROOT_PATH + f'/test_data.csv', index=False)
for action in tqdm(ACTION_LIST):
print(f"prepare data for {action}")
tmp = train.drop_duplicates(['userid', 'feedid', action], keep='last')
df_neg = tmp[tmp[action] == 0]
df_neg = df_neg.sample(frac=1.0 / ACTION_SAMPLE_RATE[action], random_state=SEED, replace=False)
df_all = pd.concat([df_neg, tmp[tmp[action] == 1]])
df_all["videoplayseconds"] = np.log(df_all["videoplayseconds"] + 1.0)
df_all.to_csv(ROOT_PATH + f'/train_data_for_{action}.csv', index=False)
if __name__ == "__main__":
prepare_data()
from sklearn.decomposition import PCA
n_dim = 32
feed_embed = pd.read_csv(FEED_EMBEDDINGS)
feed_embed['feed_embedding'] = feed_embed['feed_embedding'].apply(lambda row: [float(x) for x in row.strip().split()])
pca = PCA(n_components=n_dim)
pca_emb = pca.fit_transform(feed_embed['feed_embedding'].tolist())
feed_embed['pca_emb'] = list(pca_emb)
feed_embed = feed_embed[['feedid', 'pca_emb']]
# feed_embed.drop(['feed_embedding'], axis=1).to_csv("/content/drive/MyDrive/pca_emb%d.csv" % n_dim, index=False)
from numba import njit
from scipy.stats import rankdata
@njit
def _auc(actual, pred_ranks):
n_pos = np.sum(actual)
n_neg = len(actual) - n_pos
return (np.sum(pred_ranks[actual == 1]) - n_pos*(n_pos+1)/2) / (n_pos*n_neg)
def fast_auc(actual, predicted):
# https://www.kaggle.com/c/riiid-test-answer-prediction/discussion/208031
pred_ranks = rankdata(predicted)
return _auc(actual, pred_ranks)
def uAUC(labels, preds, user_id_list):
user_pred = defaultdict(lambda: [])
user_truth = defaultdict(lambda: [])
for idx, truth in enumerate(labels):
user_id = user_id_list[idx]
pred = preds[idx]
truth = labels[idx]
user_pred[user_id].append(pred)
user_truth[user_id].append(truth)
user_flag = defaultdict(lambda: False)
for user_id in set(user_id_list):
truths = user_truth[user_id]
flag = False
# 若全是正样本或全是负样本,则flag为False
for i in range(len(truths) - 1):
if truths[i] != truths[i + 1]:
flag = True
break
user_flag[user_id] = flag
total_auc = 0.0
size = 0.0
for user_id in user_flag:
if user_flag[user_id]:
auc = fast_auc(np.asarray(user_truth[user_id]), np.asarray(user_pred[user_id]))
total_auc += auc
size += 1.0
user_auc = float(total_auc)/size
return user_auc
def compute_weighted_score(score_dict, weight_dict):
score = 0.0
weight_sum = 0.0
for action in score_dict:
weight = float(weight_dict[action])
score += weight*score_dict[action]
weight_sum += weight
score /= float(weight_sum)
score = round(score, 6)
return score
sparse_2_dim = {
'userid': 8,
'feedid': 8,
'authorid': 8,
'bgm_song_id': 8,
'bgm_singer_id': 8,
}
dense_2_dim = {
'videoplayseconds': 1,
'pca_emb': 32,
#'w2v': 8 * 3
}
var_2_dim = {
'manual_tag_list': {'dim': 8, 'vocab_size': 354},
}
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import torch
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from collections import defaultdict
from deepctr_torch.inputs import SparseFeat, DenseFeat, get_feature_names
from deepctr_torch.models.deepfm import *
from deepctr_torch.models.basemodel import *
class MyBaseModel(BaseModel):
def fit(self, x, y, batch_size, val_data=None, epochs=1, verbose=1, mode='offline'):
x = [x[feature] for feature in self.feature_index] # type(x) = dict
for i in range(len(x)):
x[i] = np.array(x[i].tolist())
if len(x[i].shape) == 1:
x[i] = np.expand_dims(x[i], axis=1)
val_x, val_y = [], []
if mode == 'offline':
val_x, val_y = val_data
val_uids = val_x['userid'].tolist()
val_x = [val_x[feature] for feature in self.feature_index]
train_tensor_data = Data.TensorDataset(torch.from_numpy(np.concatenate(x, axis=-1)), torch.from_numpy(y))
train_loader = DataLoader(dataset=train_tensor_data, shuffle=True, batch_size=batch_size)
sample_num = len(train_tensor_data)
steps_per_epoch = (sample_num - 1) // batch_size + 1
# Train
print("Train on {0} samples, validate on {1} samples, {2} steps per epoch".format(len(train_tensor_data), len(val_y), steps_per_epoch))
epoch_logs = defaultdict(dict)
model = self.train()
for epoch in range(epochs):
start_time = time.time()
loss_epoch = 0
total_loss_epoch = 0
train_result = defaultdict(list)
for _, (x_train, y_train) in tqdm(enumerate(train_loader)):
x = x_train.to(self.device).float()
y = y_train.to(self.device).float()
y_pred = model(x).squeeze()
self.optim.zero_grad()
loss = self.loss_func(y_pred, y.squeeze(), reduction='sum')
total_loss = loss + self.get_regularization_loss() + self.aux_loss
loss_epoch += loss.item()
total_loss_epoch += total_loss.item()
total_loss.backward()
self.optim.step()
for name, func in self.metrics.items():
try:
temp = func(y.cpu().data.numpy(), y_pred.cpu().data.numpy().astype("float64"))
except:
temp = 0
finally:
train_result[name].append(temp)
# Add logs
logs = {}
logs["loss"] = total_loss_epoch / sample_num
for name, result in train_result.items():
logs[name] = np.sum(result) / steps_per_epoch
if mode == 'offline':
eval_result = self.evaluate(val_x, val_y, val_uids, batch_size)
for name, result in eval_result.items():
logs["val_" + name] = result
print('Epoch {0}/{1}, {2}s'.format(epoch + 1, epochs, int(time.time() - start_time)))
eval_str = "loss: {0: .4f}".format(logs["loss"])
for name in logs:
eval_str += " - " + name + ": {0: .4f}".format(logs[name])
print(eval_str)
epoch_logs[epoch+1] = logs
return epoch_logs
def evaluate(self, x, y, uids, batch_size=256):
preds = self.predict(x, batch_size)
eval_result = {}
for name, metric_fun in self.metrics.items():
eval_result[name] = metric_fun(y, preds)
eval_result['uAUC'] = uAUC(y.squeeze(), preds.squeeze(), uids)
return eval_result
def predict(self, x, batch_size=256):
model = self.eval()
if isinstance(x, dict):
x = [x[feature] for feature in self.feature_index]
for i in range(len(x)):
x[i] = np.array(x[i].tolist())
if len(x[i].shape) == 1:
x[i] = np.expand_dims(x[i], axis=1)
tensor_data = Data.TensorDataset(torch.from_numpy(np.concatenate(x, axis=-1)))
test_loader = DataLoader(dataset=tensor_data, shuffle=False, batch_size=batch_size)
pred_ans = []
with torch.no_grad():
for _, x_test in enumerate(test_loader):
x = x_test[0].to(self.device).float()
y_pred = model(x).cpu().data.numpy()
pred_ans.append(y_pred)
return np.concatenate(pred_ans).astype("float64")
class MyDeepFM(MyBaseModel):
def __init__(self,
linear_feature_columns, dnn_feature_columns,
dense_map = None, dnn_hidden_units=(256, 128),
l2_reg_linear=0.00001, l2_reg_embedding=0.00001, l2_reg_dnn=0, init_std=0.0001, seed=1024,
dnn_dropout=0., dnn_activation='relu', dnn_use_bn=True, task='binary', device='cpu'):
super(MyDeepFM, self).__init__(linear_feature_columns, dnn_feature_columns, l2_reg_linear=l2_reg_linear,
l2_reg_embedding=l2_reg_embedding, init_std=init_std, seed=seed, task=task,
device=device)
# dense map
dense_map = {}
self.dense_map = dense_map
self.dense_map_dict = dict([(name, nn.Linear(dense_2_dim[name], dense_map[name], bias=False).to(device)) for name in dense_map])
dim_delta = sum([dense_map[name] - dense_2_dim[name] for name in dense_map])
# dnn tower
self.dnn = DNN(self.compute_input_dim(dnn_feature_columns) + dim_delta, dnn_hidden_units,
activation=dnn_activation, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout, use_bn=dnn_use_bn,
init_std=init_std, seed=seed, device=device)
self.dnn_linear = nn.Linear(dnn_hidden_units[-1], 1, bias=False).to(device)
self.add_regularization_weight(filter(lambda x: 'weight' in x[0] and 'bn' not in x[0], self.dnn.named_parameters()), l2=l2_reg_dnn)
self.add_regularization_weight(self.dnn_linear.weight, l2=l2_reg_dnn)
self.to(device)
def forward(self, X):
sparse_embedding_list, dense_value_list = self.input_from_feature_columns(X, self.dnn_feature_columns, self.embedding_dict) # 5*[512,1,4], 1*[512,1]
# lr
logit = self.linear_model(X)
# fm
fm_input = torch.cat(sparse_embedding_list, dim=1)
square_of_sum = torch.pow(torch.sum(fm_input, dim=1, keepdim=True), 2)
sum_of_square = torch.sum(fm_input * fm_input, dim=1, keepdim=True)
logit += 0.5 * torch.sum(square_of_sum - sum_of_square, dim=2, keepdim=False)
# dense map
dense_names = [fc.name for fc in self.dnn_feature_columns if isinstance(fc, DenseFeat)]
tmp = []
for name, tensor in zip (dense_names, dense_value_list):
if name in self.dense_map_dict:
tensor = self.dense_map_dict[name](tensor)
tmp.append(tensor)
dense_value_list = tmp
# dnn tower
sparse_dnn_input = torch.flatten(torch.cat(sparse_embedding_list, dim=-1), start_dim=1)
dense_dnn_input = torch.flatten(torch.cat(dense_value_list, dim=-1), start_dim=1)
dnn_input = torch.cat([sparse_dnn_input, dense_dnn_input], dim=-1)
logit += self.dnn_linear(self.dnn(dnn_input))
return self.out(logit)
mode = 'online' # online
if __name__ == "__main__":
submit = pd.read_csv(ROOT_PATH + '/test_data.csv')[['userid', 'feedid']]
logs = {}
for action in ACTION_LIST:
print('*** train for %s ***' % action)
USE_FEAT = ['userid', 'feedid', 'device', action] + FEA_FEED_LIST[1:]
train = pd.read_csv(ROOT_PATH + f'/train_data_for_{action}.csv')[['date_'] + USE_FEAT]
# TODO: sampling
# train = train.sample(frac=0.1, random_state=42).reset_index(drop=True)
print("positive ratio:", sum((train[action] == 1) * 1) / train.shape[0])
test = pd.read_csv(ROOT_PATH + '/test_data.csv')[[i for i in USE_FEAT if i != action]]
test[action] = 0
test['date_'] = 15
test = test[['date_'] + USE_FEAT]
data = pd.concat((train, test)).reset_index(drop=True)
# universal embedding
data = pd.merge(data, feed_embed, on='feedid', how='left')
data['pca_emb'] = [e if isinstance(e, np.ndarray) else np.zeros((32)) for e in data['pca_emb']]
data['manual_tag_list'] = data['manual_tag_list'].apply(lambda row: np.array([int(x) for x in row.split()]))
# features
sparse_features = list(sparse_2_dim.keys())
dense_features = list(dense_2_dim.keys())
var_features = list(var_2_dim.keys())
print('sparse_features: ', sparse_features)
print('dense_features: ', dense_features)
print('var_features: ', var_features)
data[sparse_features] = data[sparse_features].fillna(0)
data[dense_features] = data[dense_features].fillna(0)
# 1.Label Encoding for sparse features,and do simple Transformation for dense features
for feat in sparse_features:
lbe = LabelEncoder()
data[feat] = lbe.fit_transform(data[feat])
# mms = MinMaxScaler(feature_range=(0, 1))
# data[dense_features] = mms.fit_transform(data[dense_features])
# 2.count #unique features for each sparse field,and record dense feature field name
varlen_feature_columns = [VarLenSparseFeat(SparseFeat(feat, vocabulary_size=var_2_dim[feat]['vocab_size'], embedding_dim=var_2_dim[feat]['dim']), maxlen=5, combiner='sum') for feat in var_features]
fixlen_feature_columns = [SparseFeat(feat, data[feat].nunique(), sparse_2_dim[feat]) for feat in sparse_features] + [DenseFeat(feat, dense_2_dim[feat]) for feat in dense_features]
dnn_feature_columns = fixlen_feature_columns + varlen_feature_columns
linear_feature_columns = fixlen_feature_columns + varlen_feature_columns
feature_names = get_feature_names(linear_feature_columns + dnn_feature_columns)
# 3.generate input data for model
train, test = data.iloc[:train.shape[0]].reset_index(drop=True), data.iloc[train.shape[0]:].reset_index(drop=True)
if mode == 'offline':
train_idxes, eval_idxes = train['date_'] != 14, train['date_'] == 14
train, eval = train[train_idxes].drop(['date_'], axis=1), train[eval_idxes].drop(['date_'], axis=1)
if mode == 'online':
train = train.drop(['date_'], axis=1)
eval = train.head() # fake
test = test.drop(['date_'], axis=1)
train_x = {name: train[name] for name in feature_names}
eval_x = {name: eval[name] for name in feature_names}
test_x = {name: test[name] for name in feature_names}
# 4.Define Model,train,predict and evaluate
model = MyDeepFM(
linear_feature_columns=linear_feature_columns,
dnn_feature_columns=dnn_feature_columns,
task='binary', l2_reg_embedding=1e-1, device='cuda:0' if torch.cuda.is_available() else 'cpu', seed=SEED)
model.compile("adagrad", "binary_crossentropy", metrics=["binary_crossentropy", "auc"])
act_logs = model.fit(train_x, train[[action]].values, val_data=(eval_x, eval[[action]].values), batch_size=512, epochs=2, mode=mode)
logs[action] = act_logs
# online
submit[action] = model.predict(test_x, 128)
torch.cuda.empty_cache()
# weighted uAUC
if mode == 'offline':
score_dict = {}
for act in logs:
act_logs = logs[act]
score_dict[act] = act_logs[max(act_logs.keys())]['val_uAUC']
weight_dict = {"read_comment": 4.0, "like": 3.0, "click_avatar": 2.0, "forward": 1.0, "favorite": 1.0, "comment": 1.0, "follow": 1.0}
weighted_uAUC = compute_weighted_score(score_dict, weight_dict)
print(score_dict)
print('weighted_uAUC: ', weighted_uAUC)
# online
submit.to_csv("./submit_2_45.csv", index=False)
todo:
不同的action使用不同的epoch
seed
int(k[1:-1].strip().split(',')[1])
p = data['manual_tag_list'].apply(lambda row: np.array([int(x) for x in row.split()]))
p[0].dtype
# baseline
{'read_comment': 0.6102415130979689, 'like': 0.6055234369612766, 'click_avatar': 0.7059927976309249, 'forward': 0.6832353813536607}
weighted_uAUC: 0.635276
# dnn_dropout = 0.1
{'read_comment': 0.6094100217906185, 'like': 0.6052801328988395, 'click_avatar': 0.7059140934189055, 'forward': 0.6846734262464789}
weighted_uAUC: 0.634998
# 256, 128, 128
{'read_comment': 0.613116787160124, 'like': 0.6062583852548347, 'click_avatar': 0.7058735217580193, 'forward': 0.6769030704770939}
weighted_uAUC: 0.635989
# epoch = 2
{'read_comment': 0.6117841889858322, 'like': 0.6089919743022709, 'click_avatar': 0.7138421964649098, 'forward': 0.6829949302549756}
weighted_uAUC: 0.638479
# sparse dim = 8, epoch = 2 (new baseline)
{'read_comment': 0.6126884118803656, 'like': 0.6078158393185238, 'click_avatar': 0.7141126528216767, 'forward': 0.6923154125787877}
weighted_uAUC: 0.639474
# 删除了对 videoplayseconds 的归一化(new baseline)
{'read_comment': 0.6150373746448982, 'like': 0.6087792274162345, 'click_avatar': 0.7137088800810096, 'forward': 0.6919173648006157}
weighted_uAUC: 0.640582
# add feed embedding 32(new baseline)
{'read_comment': 0.6231230935993682, 'like': 0.6162679088683002, 'click_avatar': 0.7128391281987229, 'forward': 0.6951917541544708}
weighted_uAUC: 0.646217
# add feed embedding 64
{'read_comment': 0.6179610910963779, 'like': 0.617180918593666, 'click_avatar': 0.7121687727167492, 'forward': 0.6969728833664359}
weighted_uAUC: 0.64447
# sparse dim = 12
{'read_comment': 0.6152862366363533, 'like': 0.6172504324924313, 'click_avatar': 0.7100718453099804, 'forward': 0.701999472669805}
weighted_uAUC: 0.643504
# baseline 的重复实验,线上 0.656674
{'read_comment': 0.6220072250372239, 'like': 0.6181791275945606, 'click_avatar': 0.7129768375663601, 'forward': 0.6987107057431032}
weighted_uAUC: 0.646723
# (256, 128, 64)
{'read_comment': 0.6176483873668901, 'like': 0.6170515088013665, 'click_avatar': 0.713929279701119, 'forward': 0.6961728898267605}
weighted_uAUC: 0.644578
# dnn_use_bn = True(new baseline) 线上 0.65576
{'read_comment': 0.6269150887735059, 'like': 0.6245276506750953, 'click_avatar': 0.715901103365852, 'forward': 0.7038550482328185}
weighted_uAUC: 0.65169
# dropout = 0.1
{'read_comment': 0.6261901395330384, 'like': 0.6239817428964435, 'click_avatar': 0.7163355996839406, 'forward': 0.6963938852580808}
weighted_uAUC: 0.650577
# baseline 重复实验
{'read_comment': 0.6296090935187716, 'like': 0.621418106767897, 'click_avatar': 0.7168717294762987, 'forward': 0.6967281458462133}
weighted_uAUC: 0.651316
# 先使用linear对ue降维(32->16),再接入dnn
{'read_comment': 0.6227016241604177, 'like': 0.6199456756568217, 'click_avatar': 0.7143073328988507, 'forward': 0.6791432406166807}
weighted_uAUC: 0.64584
# ue(32->4) dnn_use_bn = True
{'read_comment': 0.6214967487996874, 'like': 0.6117349933619828, 'click_avatar': 0.7123781829253133, 'forward': 0.6907462327570015}
weighted_uAUC: 0.643669
# ue(32->8) dnn_use_bn = True
{'read_comment': 0.6255372000774586, 'like': 0.6099148843168334, 'click_avatar': 0.7147080055545442, 'forward': 0.6913280305289646}
weighted_uAUC: 0.645264
# ue(32->16) dnn_use_bn = True
{'read_comment': 0.6230882216710302, 'like': 0.620136770671566, 'click_avatar': 0.716609921279133, 'forward': 0.6855595234090964}
weighted_uAUC: 0.647154
# ue(32->32) dnn_use_bn = True
{'read_comment': 0.6244980658541014, 'like': 0.6178982426111442, 'click_avatar': 0.7149869209063016, 'forward': 0.7032611484183776}
weighted_uAUC: 0.648492
# ue(32) dnn_use_bn = True * 2
{'read_comment': 0.6268910782755379, 'like': 0.6222017679020581, 'click_avatar': 0.7150488812479852, 'forward': 0.6991933553474539}
weighted_uAUC: 0.650346
# ue(32) dnn_use_bn = True 换gpu跑
{'read_comment': 0.6244744632801036, 'like': 0.6220399203865046, 'click_avatar': 0.7144416351024233, 'forward': 0.6968944127096272}
weighted_uAUC: 0.64898
# ue(32) dnn_use_bn = True 换gpu跑
{'read_comment': 0.6275061489685031, 'like': 0.6229652888075203, 'click_avatar': 0.7143199406719899, 'forward': 0.6981564617860107}
weighted_uAUC: 0.650572
# ue(32) dnn_use_bn = False
{'read_comment': 0.621629670929673, 'like': 0.6172394906951797, 'click_avatar': 0.7138702099995802, 'forward': 0.6971143776259561}
weighted_uAUC: 0.646309
# ue(32->32) dnn_use_bn = False
{'read_comment': 0.6185276618440073, 'like': 0.6170842463943023, 'click_avatar': 0.7135048353197133, 'forward': 0.6971910507462337}
weighted_uAUC: 0.644956
# 采样 4 4 4 10 (new baseline)
{'read_comment': 0.6313212962172534, 'like': 0.6220075992585066, 'click_avatar': 0.7143381748417214, 'forward': 0.6978311251747286}
weighted_uAUC: 0.651782
# + device
{'read_comment': 0.6244486461941755, 'like': 0.6263065400087143, 'click_avatar': 0.7067274451536654, 'forward': 0.7078156246005303}
weighted_uAUC: 0.649798
{'read_comment': 0.6239386227276458, 'like': 0.6246523621908081, 'click_avatar': 0.7127589873646121, 'forward': 0.7021687370818925}
weighted_uAUC: 0.64974
# baseline
{'read_comment': 0.6288471047258013, 'like': 0.6219350043589409, 'click_avatar': 0.7146902582599014, 'forward': 0.6918359172388889}
weighted_uAUC: 0.650241
# seed = 80 split_seed = 42
{'read_comment': 0.6242634094879379, 'like': 0.6264243440165063, 'click_avatar': 0.7118413185387293, 'forward': 0.6945659928938664}
weighted_uAUC: 0.649458
# seed = 80
{'read_comment': 0.6204573895103794, 'like': 0.62392994828236, 'click_avatar': 0.7235837537540227, 'forward': 0.6789320461307407}
weighted_uAUC: 0.647972
seed = 81
{'read_comment': 0.6224667184141309, 'like': 0.6232470714913648, 'click_avatar': 0.710962678982845, 'forward': 0.6922917821851972}
weighted_uAUC: 0.647383
# seed 41 42 43 44 45 avg online
{'read_comment': 0.644098, 'like': 0.63073, 'click_avatar': 0.733325, 'forward': 0.697216}
weighted_uAUC: 0.663245
# seed = 41 ; with manual_tags_dim = 8
{'read_comment': 0.6248747741666473, 'like': 0.6202584290770113, 'click_avatar': 0.7195684676287956, 'forward': 0.695015289241745}
weighted_uAUC: 0.649443
### 换gpu ###
# seed = 42 ; with manual_tags_dim = 8
{'read_comment': 0.6326313492611053, 'like': 0.6238611589799947, 'click_avatar': 0.7190238030791103, 'forward': 0.7043097908858788}
weighted_uAUC: 0.654447
# 线上 42
{'read_comment': 0.636948, 'like': 0.623838, 'click_avatar': 0.727661, 'forward': 0.693742}
weighted_uAUC: 0.656837
# 线上 seed 41 42 43 44 45 avg
{'read_comment': 0.646524, 'like': 0.629584, 'click_avatar': 0.733393, 'forward': 0.698964}
weighted_uAUC: 0.66406
# seed = 42 ; w/o manual_tags_dim = 8
{'read_comment': 0.6227764240658794, 'like': 0.6238680596273618, 'click_avatar': 0.7145209265367466, 'forward': 0.7000066937625322}
weighted_uAUC: 0.649176
data.head()
USE_FEAT
```
| github_jupyter |
```
import numpy
import sys
import nmslib
import time
import math
from sklearn.neighbors import NearestNeighbors
from sklearn.model_selection import train_test_split
# Just read the data
all_data_matrix = numpy.loadtxt('../../sample_data/sift_10k.txt')
# Create a held-out query data set
(data_matrix, query_matrix) = train_test_split(all_data_matrix, test_size = 0.1)
print("# of queries %d, # of data points %d" % (query_matrix.shape[0], data_matrix.shape[0]) )
# Set index parameters
# These are the most important onese
M = 15
efC = 100
num_threads = 4
index_time_params = {'M': M, 'indexThreadQty': num_threads, 'efConstruction': efC, 'post' : 0,
'skip_optimized_index' : 1 # using non-optimized index!
}
# Number of neighbors
K=100
# Space name should correspond to the space name
# used for brute-force search
space_name='l2sqr_sift'
# Intitialize the library, specify the space, the type of the vector and add data points
# for SIFT data, we want DENSE_UINT8_VECTOR and distance type INT
index = nmslib.init(method='hnsw',
space=space_name,
data_type=nmslib.DataType.DENSE_UINT8_VECTOR,
dtype=nmslib.DistType.INT)
index.addDataPointBatch(data_matrix.astype(numpy.uint8))
# Create an index
start = time.time()
index.createIndex(index_time_params)
end = time.time()
print('Index-time parameters', index_time_params)
print('Indexing time = %f' % (end-start))
# Setting query-time parameters
efS = 100
query_time_params = {'efSearch': efS}
print('Setting query-time parameters', query_time_params)
index.setQueryTimeParams(query_time_params)
# Querying
query_qty = query_matrix.shape[0]
start = time.time()
nbrs = index.knnQueryBatch(query_matrix.astype(numpy.uint8), k = K, num_threads = num_threads)
end = time.time()
print('kNN time total=%f (sec), per query=%f (sec), per query adjusted for thread number=%f (sec)' %
(end-start, float(end-start)/query_qty, num_threads*float(end-start)/query_qty))
# Computing gold-standard data
print('Computing gold-standard data')
start = time.time()
sindx = NearestNeighbors(n_neighbors=K, metric='l2', algorithm='brute').fit(data_matrix)
end = time.time()
print('Brute-force preparation time %f' % (end - start))
start = time.time()
gs = sindx.kneighbors(query_matrix)
end = time.time()
print('brute-force kNN time total=%f (sec), per query=%f (sec)' %
(end-start, float(end-start)/query_qty) )
# Finally computing recall
recall=0.0
for i in range(0, query_qty):
correct_set = set(gs[1][i])
ret_set = set(nbrs[i][0])
recall = recall + float(len(correct_set.intersection(ret_set))) / len(correct_set)
recall = recall / query_qty
print('kNN recall %f' % recall)
# Save a meta index and the data
index.saveIndex('dense_index_nonoptim.bin', save_data=True)
# Re-intitialize the library, specify the space, the type of the vector.
newIndex = nmslib.init(method='hnsw',
space=space_name,
data_type=nmslib.DataType.DENSE_UINT8_VECTOR,
dtype=nmslib.DistType.INT)
# Re-load the index and re-run queries
newIndex.loadIndex('dense_index_nonoptim.bin', load_data=True)
# Setting query-time parameters and querying
print('Setting query-time parameters', query_time_params)
newIndex.setQueryTimeParams(query_time_params)
query_qty = query_matrix.shape[0]
start = time.time()
new_nbrs = newIndex.knnQueryBatch(query_matrix.astype(numpy.uint8), k = K, num_threads = num_threads)
end = time.time()
print('kNN time total=%f (sec), per query=%f (sec), per query adjusted for thread number=%f (sec)' %
(end-start, float(end-start)/query_qty, num_threads*float(end-start)/query_qty))
# Finally computing recall for the new result set
recall=0.0
for i in range(0, query_qty):
correct_set = set(gs[1][i])
ret_set = set(new_nbrs[i][0])
recall = recall + float(len(correct_set.intersection(ret_set))) / len(correct_set)
recall = recall / query_qty
print('kNN recall %f' % recall)
```
| github_jupyter |
```
from google.colab import drive
drive.mount('/content/drive')
from google.colab import auth
auth.authenticate_user()
import gspread
from oauth2client.client import GoogleCredentials
gc = gspread.authorize(GoogleCredentials.get_application_default())
cd drive/"My Drive"/"Colab Notebooks"/master_project/evaluation
%%capture
!pip install krippendorff
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
import seaborn as sns
import pickle
import random
from statistics import mode, StatisticsError, mean, stdev
import krippendorff
import numpy as np
from sklearn.metrics import cohen_kappa_score
import copy
import csv
from collections import Counter
import sys
from sklearn.metrics import confusion_matrix
sys.path.append('..')
from utilities import *
with open("../HAN/df_all.pkl", "rb") as handle:
df_all = pickle.load(handle)
def get_length_info(lst):
char_length = []
word_length = []
for item in lst:
char_length.append(len(item))
word_length.append(len(item.split()))
print(f"Avg. Length (char) = {round(mean(char_length), 2)} (SD={round(stdev(char_length), 2)})")
print(f"Avg. Length (word) = {round(mean(word_length), 2)} (SD={round(stdev(word_length), 2)})\n")
all_sentences = df_all.words
negative_sentences = df_all.words[df_all.categories==0]
positive_sentences = df_all.words[df_all.categories==1]
for lst in [all_sentences, negative_sentences, positive_sentences]:
get_length_info(lst)
char_length = []
word_length = []
for item in df_all.words:
char_length.append(len(item))
word_length.append(len(item.split()))
char_random = random.sample(char_length, 25000)
char_random_y = [Counter(char_random)[i] for i in char_random]
word_random = random.sample(word_length, 25000)
word_random_y = [Counter(word_random)[i] for i in word_random]
plot = sns.barplot(x = char_random, y = char_random_y)
for ind, label in enumerate(plot.get_xticklabels()):
if ind % 10 == 0: # every 10th label is kept
label.set_visible(True)
else:
label.set_visible(False)
# new_ticks = [i.get_text() for i in plot.get_xticklabels()]
# plt.xticks(range(0, len(new_ticks), 20), new_ticks[::20])
plt.title('Length (Characters) Distribution of Sentences [25k]')
plt.xlabel("Length (Characters)")
plt.ylabel("Frequency")
plt.savefig("length_char_dist" + '.png', figsize = (16, 9), dpi=150, bbox_inches="tight")
plt.show()
plt.close()
plot = sns.barplot(x = word_random, y = word_random_y)
# for ind, label in enumerate(plot.get_xticklabels()):
# if ind % 10 == 0: # every 10th label is kept
# label.set_visible(True)
# else:
# label.set_visible(False)
plt.title('Length (words) Distribution of Sentences [25k]')
plt.xlabel("Length (words)")
plt.ylabel("Frequency")
plt.savefig("length_word_dist" + '.png', figsize = (16, 9), dpi=150, bbox_inches="tight")
plt.show()
plt.close()
with open("df_evaluation.pickle", "rb") as handle:
df_evaluation = pickle.load(handle)
original = df_evaluation["OG_sentiment"].to_list()
generated = df_evaluation["GEN_sentiment"].to_list()
count = 0
count_0_to_1_correct, count_0_to_1_total = 0, 0
count_1_to_0_correct, count_1_to_0_total = 0, 0
for og, gen in zip(original, generated):
if og == 0:
count_0_to_1_total += 1
else:
count_1_to_0_total += 1
if og != gen:
count += 1
if og == 0:
count_0_to_1_correct += 1
else:
count_1_to_0_correct += 1
print(f"accuracy [all] = {round((count/len(original))*100, 2)}%")
print(f"accuracy [0 -> 1] = {round((count_0_to_1_correct/count_0_to_1_total)*100, 2)}%")
print(f"accuracy [1 -> 0]= {round((count_1_to_0_correct/count_1_to_0_total)*100, 2)}%")
from sklearn.metrics import classification_report
print(classification_report(original, generated))
# Accuracy human evaluation subset
pd.set_option('display.max_colwidth', -1) # show more of pandas dataframe
df_evaluation
with open("../sentence_generatedsentence_dict.pickle", "rb") as handle:
sentence_generatedsentence_dict = pickle.load(handle)
og_negative_sentences = [sent for sent in df_evaluation.OG_sentences[df_evaluation["OG_sentiment"] == 0].to_list() if len(sent.split()) <= 15]
og_positive_sentences = [sent for sent in df_evaluation.OG_sentences[df_evaluation["OG_sentiment"] == 1].to_list() if len(sent.split()) <= 15]
random.seed(42)
human_evaluation_og_sti = random.sample(og_negative_sentences, 50) + random.sample(og_positive_sentences, 50)
human_evaluation_gen_sti = [sentence_generatedsentence_dict[sent] for sent in human_evaluation_og_sti]
random.seed(4)
human_evaluation_og_nat = random.sample(og_negative_sentences, 50) + random.sample(og_positive_sentences, 50)
human_evaluation_gen_nat = [sentence_generatedsentence_dict[sent] for sent in human_evaluation_og_nat]
original_sentence = df_evaluation["OG_sentences"].to_list()
generated_sentence = df_evaluation["GEN_sentences"].to_list()
original_sentiment = df_evaluation["OG_sentiment"].to_list()
generated_sentiment = df_evaluation["GEN_sentiment"].to_list()
wrong_0_to_1, correct_0_to_1 = [], []
wrong_1_to_0, correct_1_to_0 = [], []
for og_sentence, gen_sentence, og_sentiment, gen_sentiment in zip(original_sentence, generated_sentence, original_sentiment, generated_sentiment):
if og_sentiment != gen_sentiment:
if og_sentiment == 0:
correct_0_to_1.append((og_sentence, gen_sentence))
else:
correct_1_to_0.append((og_sentence, gen_sentence))
else:
if og_sentiment == 0:
wrong_0_to_1.append((og_sentence, gen_sentence))
else:
wrong_1_to_0.append((og_sentence, gen_sentence))
# correct_1_to_0
# for i, j in correct_1_to_0[:10000]:
# i = " ".join(i.strip().split())
# j = " ".join(j.strip().split())
# if len(i) <= 100:
# print("",i,"\n",j, end="\n\n")
# 10 wrong 0 -> 1
wrong_0_to_1[:10]
for i, j in wrong_0_to_1[:10]:
print(i, "#", j)
# 10 correct 0 -> 1
correct_0_to_1[:10]
for i, j in correct_0_to_1[:10]:
print(i, "#", j)
# 10 wrong 1 -> 0
wrong_1_to_0[:10]
for i, j in wrong_1_to_0[:10]:
print(i, "#", j)
# 10 correct 0 -> 1
correct_1_to_0[:10]
for i, j in correct_1_to_0[:10]:
print(i, "#", j)
reverse_dict = {"negative": 0, "positive": 1, "neither": 2, "either": 2} # made type in neither so added either as 2 as well
```
## Style Transfer Intensity
```
# Style Transfer intensity
sti_responses = gc.open_by_url('https://docs.google.com/spreadsheets/d/1_B3ayl6-p3nRl3RUtTgcu7fGT2v3n6rg3CLrR4wTafQ/edit#gid=2064143541')
sti_response_sheet = sti_responses.sheet1
sti_reponse_data = sti_response_sheet.get_all_values()
# sti_reponse_data
sti_answer_dict = {}
for idx, row in enumerate(sti_reponse_data[1:]):
if row[1] != "":
sti_answer_dict[idx] = [(idx, reverse_dict[i]) for idx, i in enumerate(row[2:-1])]
# inter-annotator agreement
k_alpha = krippendorff.alpha([[i[1] for i in v] for k, v in sti_answer_dict.items()])
print("Krippendorffs' Alpha:")
print(round(k_alpha,4))
# inter-annotator agreement, ignoring neither cases
remove_indexes = []
for lst in [v for k, v in sti_answer_dict.items()]:
for idx, i in enumerate(lst):
if i[1] == 2:
remove_indexes.append(idx)
sti_answers_without_neither = copy.deepcopy([v for k, v in sti_answer_dict.items()])
for lst in sti_answers_without_neither:
for i in sorted(set(remove_indexes), reverse=True):
del lst[i]
print("\nKrippendorffs' Alpha (ignoring neither cases):")
print(f"Answers remaining: {len(sti_answers_without_neither[0])}%")
k_alpha = krippendorff.alpha([[j[1] for j in usr] for usr in sti_answers_without_neither])
print(round(k_alpha,4))
# amount neither
neither_percentage = 0
for k, v in sti_answer_dict.items():
v = [i[1] for i in v]
neither_percentage += Counter(v)[2]/len(v)
print(f"Average amount of neither selected: {round((neither_percentage/3)*100, 2)}%")
# Select most common answer of each human evaluator, if all same, select random
final_sti_human_answers = []
for idx, i in enumerate(np.array([[i[1] for i in v] for k, v in sti_answer_dict.items()]).transpose()):
try:
final_sti_human_answers.append((idx, mode(i)))
except StatisticsError as e:
final_sti_human_answers.append((idx, random.choice(i)))
with open("df_evaluation.pickle", "rb") as handle:
df_evaluation = pickle.load(handle)
id_sentence_dict = {}
for idx, sentence in enumerate(sti_reponse_data[0][2:-1]):
id_sentence_dict[idx] = sentence
sentence_human_sentiment = {}
for sentence_id, sentiment in final_sti_human_answers:
if sentiment == 2:
continue
sentence_human_sentiment[id_sentence_dict[sentence_id]] = sentiment
human_sentiment = [v for k,v in sentence_human_sentiment.items()]
og_sentiment = []
for k, v in sentence_human_sentiment.items():
og_sentiment.append(df_evaluation.OG_sentiment[df_evaluation.GEN_sentences==k].item())
# Accuracy style transfer intensity for human classification
count = 0
count_0_to_1_correct, count_0_to_1_total = 0, 0
count_1_to_0_correct, count_1_to_0_total = 0, 0
for og, gen in zip(og_sentiment, human_sentiment):
if og == 0:
count_0_to_1_total += 1
else:
count_1_to_0_total += 1
if og != gen:
count += 1
if og == 0:
count_0_to_1_correct += 1
else:
count_1_to_0_correct += 1
print(f"accuracy [including neither] = {round((count/len(final_sti_human_answers))*100, 2)}%")
print(f"accuracy [excluding neither] = {round((count/len(og_sentiment))*100, 2)}%")
print(f"accuracy [0 -> 1] = {round((count_0_to_1_correct/count_0_to_1_total)*100, 2)}%")
print(f"accuracy [1 -> 0]= {round((count_1_to_0_correct/count_1_to_0_total)*100, 2)}%")
# Agreement between human and automatic evaluation
gen_sentiment = []
for k, v in sentence_human_sentiment.items():
gen_sentiment.append(df_evaluation.GEN_sentiment[df_evaluation.GEN_sentences==k].item())
k_alpha = krippendorff.alpha([gen_sentiment, human_sentiment])
print("\nKrippendorffs' Alpha:")
print(round(k_alpha,4))
# https://www.ncbi.nlm.nih.gov/pubmed/15883903 reference to cohen's kappa
print(f"Cohen's Kappa:\n{round(cohen_kappa_score(gen_sentiment, human_sentiment), 4)}")
cm = confusion_matrix(og_sentiment, human_sentiment)
create_confusion_matrix(cm, ["neg", "pos"], show_plots=True, title="Gold labels vs. Human Predictions",
xlabel="Human Labels", ylabel="Gold Labels", dir="", y_lim_value=2, save_plots=True)
cm = confusion_matrix(gen_sentiment, human_sentiment)
create_confusion_matrix(cm, ["neg", "pos"], show_plots=True, title="Automatic vs. Human Predictions",
xlabel="Human Labels", ylabel="Automatic Labels", dir="", y_lim_value=2, save_plots=True)
```

## Naturalness (Isolated)
```
# Naturalness (isolated)
nat_iso_responses = gc.open_by_url('https://docs.google.com/spreadsheets/d/1tEOalZErOjSOD8DGKfvi-edv8sKkGczLx0eYi7N6Kjw/edit#gid=1759015116')
nat_iso_response_sheet = nat_iso_responses.sheet1
nat_iso_reponse_data = nat_iso_response_sheet.get_all_values()
# nat_iso_reponse_data
nat_iso_answer_dict = {}
for idx, row in enumerate(nat_iso_reponse_data[1:]):
if row[1] != "":
nat_iso_answer_dict[idx] = [int(i) for i in row[2:-1]]
# inter-annotator agreement
print("Krippendorffs' Alpha:")
k_alpha = krippendorff.alpha([v for k,v in nat_iso_answer_dict.items()])
print(round(k_alpha,4))
# naturalness mean (isolated)
naturalness_mean_list = []
for idx, row in enumerate(nat_iso_reponse_data[1:]):
if row[1] != "":
naturalness_mean_list.append(int(i) for i in row[2:-1])
print("Mean of naturalness (isolated):")
print(round(mean([mean(i) for i in naturalness_mean_list]),4))
nat_all = []
for k, v in nat_iso_answer_dict.items():
nat_all += v
nat_all_dist = Counter(nat_all)
nat_all_dist
# naturalness (isolated) distribution
fig = plt.figure(figsize=[7, 5], dpi=100)
ax = fig.add_axes([0,0,1,1])
ax.bar(nat_all_dist.keys(), nat_all_dist.values())
plt.title("Naturalness (Isolated) distribution")
plt.xlabel("Answer")
plt.ylabel("Frequency")
plt.savefig("naturalness_isolated_dist" + '.png', figsize = (16, 9), dpi=150, bbox_inches="tight")
plt.show()
plt.close()
df_evaluation
id_sentiment_dict = {}
for idx, sentence in enumerate(nat_iso_reponse_data[0][2:-1]):
# GEN_sentiment
sentiment = df_evaluation.OG_sentiment[df_evaluation.GEN_sentences == sentence].item()
id_sentiment_dict[idx] = sentiment
nat_iso_answer_dict_div = {}
for idx, row in enumerate(nat_iso_reponse_data[1:]):
if row[1] != "":
nat_iso_answer_dict_div[idx] = ([int(i) for id, i in enumerate(row[2:-1]) if id_sentiment_dict[id] == 0],
[int(i) for id, i in enumerate(row[2:-1]) if id_sentiment_dict[id] == 1])
nat_all_neg, nat_all_pos = [], []
for k, (v_neg, v_pos) in nat_iso_answer_dict_div.items():
nat_all_neg += v_neg
nat_all_pos += v_pos
nat_all_dist_neg = Counter(nat_all_neg)
nat_all_dist_pos = Counter(nat_all_pos)
df = pd.DataFrame([['g1','c1',10],['g1','c2',12],['g1','c3',13],['g2','c1',8],
['g2','c2',10],['g2','c3',12]],columns=['group','column','val'])
df = pd.DataFrame([nat_all_dist_neg, nat_all_dist_pos]).T
ax = df.plot(kind='bar')
ax.figure.set_size_inches(16, 9)
plt.title("Naturalness (Isolated) distribution")
plt.xlabel("Answer")
plt.ylabel("Frequency")
plt.xticks(rotation='horizontal')
ax.figure.savefig("naturalness_isolated_dist_div" + '.png', figsize = (16, 9), dpi=150, bbox_inches="tight")
plt.legend(["Negative", "Positive"])
plt.show()
plt.close()
```
## Naturalness (Comparison)
```
# Naturalness (comparison)
nat_comp_responses = gc.open_by_url('https://docs.google.com/spreadsheets/d/1mFtsNNaJXDK2dT9LkLz_r8LSfIOPskDqn4jBamE-bns/edit#gid=890219669')
nat_comp_response_sheet = nat_comp_responses.sheet1
nat_comp_reponse_data = nat_comp_response_sheet.get_all_values()
# nat_comp_reponse_data
nat_comp_answer_dict = {}
for idx, row in enumerate(nat_comp_reponse_data[1:]):
if row[1] != "":
nat_comp_answer_dict[idx] = [int(i) for i in row[2:-1]]
# inter-annotator agreement
print("Krippendorffs' Alpha:")
k_alpha = krippendorff.alpha([v for k,v in nat_comp_answer_dict.items()])
print(round(k_alpha,4))
# naturalness mean (comparison)
naturalness_mean_list = []
for idx, row in enumerate(nat_comp_reponse_data[1:]):
if row[1] != "":
naturalness_mean_list.append(int(i) for i in row[2:-1])
print("Mean of naturalness (comparison):")
print(round(mean([mean(i) for i in naturalness_mean_list]),4))
nat_comp_questions = gc.open_by_url('https://docs.google.com/spreadsheets/d/1uxAGaOvJcb-Cg3wjTDEovTgR--TFZet0VnpzInljjfo/edit#gid=167268481')
nat_comp_questions_sheet = nat_comp_questions.sheet1
nat_comp_questions_data = nat_comp_questions_sheet.get_all_values()
# naturalness (og vs. gen naturalness)
# 1: A is far more natural than B
# 2: A is slightly more natural than B
# 3: A and B are equally natural
# 4: B is slightly more natural than A
# 5 : B is far more natural than A
# 1: OG is far more natural than GEN
# 2: OG is slightly more natural than GEN
# 3: OG and GEN are equally natural
# 4: GEN is slightly more natural than OG
# 5: GEN is far more natural than OG
one, two, three, four, five = 0, 0, 0, 0, 0
for idx, row in enumerate(nat_comp_reponse_data[1:]):
if row[1] != "":
for idx2, (row, answer) in enumerate(zip(nat_comp_questions_data[1:], row[2:-1])):
original, generated = row[-2:]
answer = int(answer)
# print("A", "B", "|", original, generated, "|", answer)
if original == "A":
if answer == 1:
one += 1
if answer == 2:
two += 1
if answer == 3:
three += 1
if answer == 4:
four += 1
if answer == 5:
five += 1
if original == "B":
if answer == 1:
five += 1
if answer == 2:
four += 1
if answer == 3:
three += 1
if answer == 4:
two += 1
if answer == 5:
one += 1
print(one,two,three,four,five)
print("Mean of naturalness (comparison) original vs. generated:")
print(round((one*1+two*2+three*3+four*4+five*5)/sum([one,two,three,four,five]),4))
# naturalness (comparison) distribution
fig = plt.figure(figsize=[7, 5], dpi=100)
answers = {'OG is far more natural than GEN ':'red',
'OG is slightly more natural than GEN':'green',
'OG and GEN are equally natural':'blue',
'GEN is slightly more natural than OG':'orange',
'GEN is far more natural than OG': 'purple'}
labels = list(answers.keys())
handles = [plt.Rectangle((0,0),1,1, color=answers[label]) for label in labels]
ax = fig.add_axes([0,0,1,1])
plt.bar([1,2,3,4,5], [one,two,three,four,five], color=answers.values())
plt.title("Naturalness (Comparison) distribution [translated]")
plt.legend(handles, labels)
plt.xlabel("Answer")
plt.ylabel("Frequency")
plt.savefig("naturalness_comparison_dist_translated" + '.png', figsize = (16, 9), dpi=150, bbox_inches="tight")
plt.show()
plt.close()
nat_all = []
for k, v in nat_comp_answer_dict.items():
nat_all += v
nat_all_dist = Counter(nat_all)
nat_all_dist
# naturalness (comparison) distribution
fig = plt.figure(figsize=[7, 5], dpi=100)
ax = fig.add_axes([0,0,1,1])
ax.bar(nat_all_dist.keys(), nat_all_dist.values())
plt.title("Naturalness (Comparison) distribution")
plt.xlabel("Answer")
plt.ylabel("Frequency")
plt.savefig("naturalness_comparison_dist" + '.png', figsize = (16, 9), dpi=150, bbox_inches="tight")
plt.show()
plt.close()
```
## Which Words
```
# Which words
ww_responses = gc.open_by_url('https://docs.google.com/spreadsheets/d/1bRoF5l8Lt9fqeOki_YrJffd2XwEpROKi1RUsbC1umIk/edit#gid=1233025762')
ww_response_sheet = ww_responses.sheet1
ww_reponse_data = ww_response_sheet.get_all_values()
ww_answer_dict = {}
for idx, row in enumerate(ww_reponse_data[1:]):
if row[1] != "":
ww_answer_dict[idx]= [[word.strip() for word in i.split(",")] for i in row[2:-1]]
# Human-annotator agreement
user1 = ww_answer_dict[0]
user2 = ww_answer_dict[1]
total = 0
for l1, l2 in zip(user1, user2):
total += len((set(l1) & set(l2)))/max(len(l1), len(l2))
print("Human Annotator Agreement, which word:")
print(f"{round((total/len(user1)*100), 2)}%")
# Human-annotator agreement (Ignoreing <NONE>)
user1 = ww_answer_dict[0]
user2 = ww_answer_dict[1]
total = 0
none = 0
for l1, l2 in zip(user1, user2):
if l1==['<NONE>'] or l2==['<NONE>']:
none+=1
continue
total += len((set(l1) & set(l2)))/max(len(l1), len(l2))
print("Human Annotator Agreement, which word:")
print(f"{round((total/(len(user1)-none)*100), 2)}%")
# Human-annotator agreement on <NONE>
user1 = ww_answer_dict[0]
user2 = ww_answer_dict[1]
none = 0
none_both = 0
for l1, l2 in zip(user1, user2):
if l1==['<NONE>'] or l2==['<NONE>']:
none+=1
if l1==l2:
none_both+=1
print("Human Annotator Agreement, <NONE>:")
print(f"{round((none_both/none)*100, 2)}%")
# Human-annotator agreement on <NONE>
user1 = ww_answer_dict[0]
user2 = ww_answer_dict[1]
human_total_words_chosen = 0
for l1, l2 in zip(user1, user2):
human_total_words_chosen += len(set(l1) & set(l2))
with open("../to_substitute_dict.pickle", "rb") as handle:
to_substitute_dict = pickle.load(handle)
id_sentence_dict = {}
for idx, sentence in enumerate(ww_reponse_data[0][2:-1]):
id_sentence_dict[idx] = sentence
cls_total_words_chosen = 0
total = 0
amount_none = 0
for l1, l2, (k, v) in zip(user1, user2, id_sentence_dict.items()):
human_chosen_words = set(l1) & set(l2)
if human_chosen_words == {'<NONE>'}:
amount_none += 1
cls_total_words_chosen -= len(classifier_chosen_words)
classifier_chosen_words = {v.split()[idx] for idx, _ in to_substitute_dict[v]}
cls_total_words_chosen += len(classifier_chosen_words)
total += len((human_chosen_words & classifier_chosen_words))/max(len(human_chosen_words), len(classifier_chosen_words))
print("Classifier/Human Agreement, which word (counting none):")
print(f"{round((total/len(user1)*100), 2)}%")
print("\nClassifier/Human Agreement, which word (excluding none):")
print(f"{round((total/(len(user1)-amount_none)*100), 2)}%")
print(f"\nAmount of <NONE> chosen by all annotators:\n{round(len(user1)/amount_none, 2)}%")
print("\ntotal words chosen by Human Evaluators")
print(f"{human_total_words_chosen}")
print("total words chosen by Classifier")
print(f"{cls_total_words_chosen}")
# More example sentences, for better in-depth analysis
sentences_one, sentences_two, sentences_three, sentences_four, sentences_five = [], [], [], [], []
for idx, row in enumerate(nat_comp_reponse_data[1:]):
if row[1] != "":
for idx2, (row, answer) in enumerate(zip(nat_comp_questions_data[1:], row[2:-1])):
original, generated = row[-2:]
answer = int(answer)
if generated == "A":
generated_sentence = row[0].rsplit(":")[1].strip()
original_sentence = row[2].rsplit(":")[1].strip()
elif generated == "B":
generated_sentence = row[2].rsplit(":")[1].strip()
original_sentence = row[0].rsplit(":")[1].strip()
# print("A", "B", "|", original, generated, "|", answer)
if original == "A":
if answer == 1:
sentences_one.append(generated_sentence)
if answer == 2:
sentences_two.append(generated_sentence)
if answer == 3:
sentences_three.append(generated_sentence)
if answer == 4:
sentences_four.append(generated_sentence)
if answer == 5:
sentences_five.append(generated_sentence)
if original == "B":
if answer == 1:
sentences_five.append(generated_sentence)
if answer == 2:
sentences_four.append(generated_sentence)
if answer == 3:
sentences_three.append(generated_sentence)
if answer == 4:
sentences_two.append(generated_sentence)
if answer == 5:
sentences_one.append(generated_sentence)
print(len(sentences_one), len(sentences_two), len(sentences_three), len(sentences_four), len(sentences_five))
low_natural_sentences = sentences_one + sentences_two
high_natural_sentences = sentences_three + sentences_four + sentences_five
og_sentiment, gen_sentiment = [], []
for sentence in low_natural_sentences:
og_sentiment.append(df_evaluation.OG_sentiment[df_evaluation.GEN_sentences == sentence].item())
gen_sentiment.append(df_evaluation.GEN_sentiment[df_evaluation.GEN_sentences == sentence].item())
print("Accuracy Low Naturalness Sentences")
print(round((1-accuracy_score(og_sentiment, gen_sentiment))*100, 4))
og_sentiment, gen_sentiment = [], []
for sentence in high_natural_sentences:
og_sentiment.append(df_evaluation.OG_sentiment[df_evaluation.GEN_sentences == sentence].item())
gen_sentiment.append(df_evaluation.GEN_sentiment[df_evaluation.GEN_sentences == sentence].item())
print("\nAccuracy High Naturalness Sentences")
print(round((1-accuracy_score(og_sentiment, gen_sentiment))*100, 4))
length = []
for sentence in low_natural_sentences:
og_sentence = df_evaluation.OG_sentences[df_evaluation.GEN_sentences == sentence].item()
length.append(len(to_substitute_dict[og_sentence]))
print("Avg. amount of words substituted Low Naturalness Sentences")
print(round(mean(length), 2))
length = []
for sentence in high_natural_sentences:
og_sentence = df_evaluation.OG_sentences[df_evaluation.GEN_sentences == sentence].item()
length.append(len(to_substitute_dict[og_sentence]))
print("\nAvg. amount of words substituted High Naturalness Sentences")
print(round(mean(length), 2))
print("Examples of generated sentence more natural than source sentence\n")
for sentence in sentences_five+sentences_four:
og_sentence = df_evaluation.OG_sentences[df_evaluation.GEN_sentences == sentence].item()
print(f"OG = {og_sentence}\nGEN = {sentence}\n")
print("Examples of generated sentence as natural as source sentence\n")
for idx, sentence in enumerate(sentences_three):
og_sentence = df_evaluation.OG_sentences[df_evaluation.GEN_sentences == sentence].item()
print(f"OG = {og_sentence}\nGEN = {sentence}\n")
if idx == 10:
break
user_answers = []
for idx, row in enumerate(nat_iso_reponse_data[1:]):
if row[1] != "":
answers = [int(i) for i in row[2:-1]]
user_answers.append(answers)
highly_natural_sentences = [] # average naturalness >= 4
highly_unnatural_sentences = [] # average naturalness <= 2
for idx, sentence in enumerate(nat_iso_reponse_data[0][2:-1]):
answers = []
for user in user_answers:
answers.append(user[idx])
if mean(answers) >= 4:
highly_natural_sentences.append(sentence)
elif mean(answers) <= 2:
highly_unnatural_sentences.append(sentence)
print(len(highly_natural_sentences), len(highly_unnatural_sentences))
print("Examples of highly natural sentences\n")
for sentence in highly_natural_sentences:
print(sentence)
print("\nExamples of highly unnatural sentences\n")
for sentence in highly_unnatural_sentences:
print(sentence)
int_to_string_dict = {0: "negative", 1: "positive"}
user_answers = []
for idx, row in enumerate(sti_reponse_data[1:]):
if row[1] != "":
answers = [i for i in row[2:-1]]
user_answers.append(answers)
all_neither_sentences = []
all_negative_sentences = []
all_positive_sentences = []
human_cls_agree_transfer = []
human_cls_agree_no_transfer = []
human_yes_cls_no = []
human_no_cls_yes = []
for idx, sentence in enumerate(sti_reponse_data[0][2:-1]):
answers = []
for user in user_answers:
answers.append(user[idx])
if set(answers) == {'neither'}:
all_neither_sentences.append(sentence)
if set(answers) == {'negative'}:
all_negative_sentences.append(sentence)
if set(answers) == {'positive'}:
all_positive_sentences.append(sentence)
try:
human_sentiment = mode(answers)
except StatisticsError as e:
human_sentiment = random.choice(answers)
cls_sentiment = int_to_string_dict[df_evaluation.GEN_sentiment[df_evaluation.GEN_sentences == sentence].item()]
og_sentiment = int_to_string_dict[df_evaluation.OG_sentiment[df_evaluation.GEN_sentences == sentence].item()]
union = set([human_sentiment])|set([cls_sentiment])
if (len(union) == 1) and ({og_sentiment} != union):
og_sentence = df_evaluation.OG_sentences[df_evaluation.GEN_sentences == sentence].item()
human_cls_agree_transfer.append((og_sentence, sentence))
if (len(union) == 1) and ({og_sentiment} == union):
og_sentence = df_evaluation.OG_sentences[df_evaluation.GEN_sentences == sentence].item()
human_cls_agree_no_transfer.append((og_sentence, sentence))
if (human_sentiment != og_sentiment) and (gen_sentiment == og_sentiment):
og_sentence = df_evaluation.OG_sentences[df_evaluation.GEN_sentences == sentence].item()
human_yes_cls_no.append((og_sentence, sentence))
if (human_sentiment == og_sentiment) and (gen_sentiment != og_sentiment):
og_sentence = df_evaluation.OG_sentences[df_evaluation.GEN_sentences == sentence].item()
human_no_cls_yes.append((og_sentence, sentence))
threshold = 20
print("Examples of sentences that were classified as neither by all evaluators")
print("-"*40, f"[{len(all_neither_sentences)}]", "-"*40)
for sentence in all_neither_sentences[:threshold]:
print(sentence)
print("\nExamples of sentences that were classified as negative by all evaluators")
print("-"*40, f"[{len(all_negative_sentences)}]", "-"*40)
for sentence in all_negative_sentences[:threshold]:
print(sentence)
print("\nExamples of sentences that were classified as positive by all evaluators")
print("-"*40, f"[{len(all_positive_sentences)}]", "-"*40)
for sentence in all_positive_sentences[:threshold]:
print(sentence)
print("\nClassification examples where both human + cls agree style is transferred")
print("-"*40, f"[{len(human_cls_agree_transfer)}]", "-"*40)
for og_sentence, gen_sentence in human_cls_agree_transfer[:threshold]:
print(f"{og_sentence}\n{gen_sentence}\n")
print("\nClassification examples where human says style is transferred, but cls not")
print("-"*40, f"[{len(human_yes_cls_no)}]", "-"*40)
for og_sentence, gen_sentence in human_yes_cls_no[:threshold]:
print(f"{og_sentence}\n{gen_sentence}\n")
print("\nClassification examples where cls says style is transferred, but human not")
print("-"*40, f"[{len(human_no_cls_yes)}]", "-"*40)
for og_sentence, gen_sentence in human_no_cls_yes[:threshold]:
print(f"{og_sentence}\n{gen_sentence}\n")
print("\nClassification examples where both human + cls agree style is not transferred")
print("-"*40, f"[{len(human_cls_agree_no_transfer)}]", "-"*40)
for og_sentence, gen_sentence in human_cls_agree_no_transfer[:threshold]:
print(f"{og_sentence}\n{gen_sentence}\n")
```
| github_jupyter |
```
class Solution:
def removeInvalidParentheses(self, s: str):
if not s: return []
self.max_len = self.get_max_len(s)
self.ans = []
self.dfs(s, 0, "", 0)
return self.ans
def dfs(self, s, idx, cur_str, count):
if len(cur_str) > self.max_len: return
if count < 0: return # count表示 "(" 的数量
if idx == len(s): # 遍历到了最后 s 的一个字母
if count == 0 and len(cur_str) == self.max_len:
self.ans.append(cur_str)
return
# 如果是其他字母,可以直接添加,不会收到影响
if s[idx] != '(' and s[idx] != ')':
self.dfs(s, idx+1, cur_str+s[idx], count)
else:
val = 1 if s[idx] == '(' else -1
# 肯定取,有两种情况,最后一个字符与cur_str的最后一个字符相同
# 或者是不同
self.dfs(s, idx+1, cur_str+s[idx], count+val)
if not cur_str or s[idx] != cur_str[-1]:
# 对于不同的情况是可以不取的
self.dfs(s, idx+1, cur_str, count)
def get_max_len(self, s):
"""返回原始字符串是 valid 的最大长度"""
l_count, res = 0, 0
for a in s:
if a == '(':
l_count += 1
elif a == ')':
if l_count == 0:
res += 1
else:
l_count -= 1
return len(s) - l_count - res
class Solution:
def removeInvalidParentheses(self, s: str):
if not s: return [""]
self.max_len = self.get_max_len(s)
self.ans = []
self.dfs(s, 0, "", 0)
return self.ans
def dfs(self, s, idx, cur_str, count):
# count代表了 “(” 的数量,如果小于0,一定不合法
if len(cur_str) > self.max_len: return
if count < 0: return
if idx == len(s): # 遍历到了最后 s 的一个字母
if count == 0 and len(cur_str) == self.max_len:
self.ans.append(cur_str)
return
# 其他字母
if s[idx] != '(' and s[idx] != ')':
self.dfs(s, idx+1, cur_str+s[idx], count)
else:
val = 1 if s[idx] == '(' else -1
self.dfs(s, idx+1, cur_str+s[idx], count+val)
if not cur_str or s[idx] != cur_str[-1]:
self.dfs(s, idx+1, cur_str, count)
def get_max_len(self, s):
l_count, res = 0, 0
for a in s:
if a == '(':
l_count += 1
elif a == ')':
if l_count == 0:
res += 1
else:
l_count -= 1
return len(s) - l_count - res
solution = Solution()
solution.removeInvalidParentheses("(a)())()")
```
| github_jupyter |
<a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W2D1_DeepLearning/W2D1_Outro.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> <a href="https://kaggle.com/kernels/welcome?src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W2D1_DeepLearning/W2D1_Outro.ipynb" target="_parent"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" alt="Open in Kaggle"/></a>
# Outro
**Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs**
<p align='center'><img src='https://github.com/NeuromatchAcademy/widgets/blob/master/sponsors.png?raw=True'/></p>
## Video 1
```
# @markdown
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV1M54y1B7hs", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="pzA1GpxodnM", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
```
## Video 2
```
# @markdown
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1GT4y1j7aQ", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"nWlgIclpyt4", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
```
## Daily survey
Don't forget to complete your reflections and content check in the daily survey! Please be patient after logging in as there is
a small delay before you will be redirected to the survey.
<a href="https://portal.neuromatchacademy.org/api/redirect/to/519adc7c-d4a4-4a75-8ae9-31cccb1e1f5a"><img src="https://github.com/NeuromatchAcademy/course-content/blob/master/tutorials/static/button.png?raw=1" alt="button link to survey" style="width:410px"></a>
## Slides
```
# @markdown
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/z5g93/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
```
| github_jupyter |
# Suicide Analysis in India
In this notebook we will try to understand what might be the different reasons due to which people committed suicide in India (using the dataset "Suicides in India"). Almost 11,89,068 people committed suicide in 2012 alone, it is quite important to understand why they commit suicide and try to mitigate.
```
# import lib
import numpy as np #for math operations
import pandas as pd#for data manipulation
import plotly.express as px#for better visualization
import plotly.io as pio
# read dataset
data = pd.read_csv('../input/suicides-in-india/Suicides in India 2001-2012.csv')
data.tail(10)
```
# Dataset Information
```
data.info()
```
# Check Missing & Null Values
```
data.isna().sum()
```
# People committed suicide from 2001-2012
```
print("Total cases from 2001-12: \n",data.groupby("Year")["Total"].sum())
data.groupby("Year")["Total"].sum().plot(kind="line",marker="o",title="People Commited Suicide From 2001-2012")
```
# States Present Inside Dataset
This step is for merging states with same name and remove redundency.
```
data["State"].value_counts()
```
Remove rows with value as Total (States), Total (All India) or Total (Uts)
```
data = data[(data["State"]!="Total (States)")&(data["State"]!="Total (Uts)")&(data["State"]!="Total (All India)") ]
```
# Which Gender with Highest number of suicide?
Males are commiting more sucides in comaprision to females
```
filter_gender = pd.DataFrame(data.groupby("Gender")["Total"].sum()).reset_index()
px.bar(filter_gender,x="Gender", y="Total",color="Gender")
```
# States with Higher Suicide cases
1. Maharashtra<br>
2. West Bengal<br>
3. Tamil Nadu<br>
4. Andhra Pradesh<br>
```
pio.templates.default = "plotly_dark"
filter_state = pd.DataFrame(data.groupby(["State"])["Total"].sum()).reset_index()
px.bar(filter_state,x = 'State', y = 'Total',color="State")
```
# Number of cases changing over time
Changing Rate of sucides over time
```
grouped_year = data.groupby(["Year","Gender"])["Total"].sum()
grouped_year = pd.DataFrame(grouped_year).reset_index()
# grouped_year
px.line(grouped_year,x="Year", y="Total", color="Gender")
```
# Number of cases based on the reasons they committed suicide
```
filter_type_code = pd.DataFrame(data.groupby(["Type_code","Year"])["Total"].sum()).reset_index()
filter_type_code
px.bar(filter_type_code,x="Type_code", y="Total",color="Year")
```
# Which social issues causes more suicides?
It is clear that **married people** are more Suicides.<br>
Which makes sense because marriage issues may cause conflict between the couple and as a result they might be prone to commit suicide.
```
filter_social_status = pd.DataFrame(data[data["Type_code"]=="Social_Status"].groupby(["Type","Gender"])["Total"].sum()).reset_index()
px.bar(filter_social_status,x="Type", y="Total",color="Gender")
```
# Education status of people who committed suicides
people with low education are commiting more suicide.<br>
People with Diploma and Graduate tend to commit least no. of suicide
```
filter_social_status = pd.DataFrame(data[data["Type_code"]=="Education_Status"].groupby(["Type","Gender"])["Total"].sum()).reset_index()
fig = px.bar(filter_social_status,x="Type", y="Total",color="Gender")
fig.show(rotation=90)
```
# Profession of the people who committed suicides
**Farmers** and **housewives** have commited more suicide compared to others.
This makes sense because most of the Indian farmers have debt and their life depends on the yield of their crops, if the yield is not good then they will not be able to clear their debt and in the worst case they might commit suicide.
> Global warming, monsoon delay, drought etc can lead to bad yield.
Housewives might have issues in their marriage which this might be a reason for such a high number of cases.
> Domestic violence, dowry, gender discrimination, etc might be some of the reasons for housewives to commit suicide.
```
filter_social_status = pd.DataFrame(data[data["Type_code"]=="Professional_Profile"].groupby(["Type","Gender"])["Total"].sum()).reset_index()
fig2 = px.bar(filter_social_status,x="Type", y="Total",color="Gender")
fig2.show(rotation=90)
```
# Which age group people have commited most Suicides?
From the below visualization it is clear that youngsters (15-29 age) and middle age (30-44) tend to commit the maximum number of suicides.
It can be due to several reasons like:
* unemployment
* academic stress
* bad friend circle
* farmers (since they have to be young and strong enough to do farming)
* addictions
```
# age group 0-100+ encapsulates all the remaining age groups, hence it would make sense to drop it
import matplotlib.pyplot as plt #for visualization
import seaborn as sns
%matplotlib inline
sns.set(rc={'figure.figsize':(11.7,8.27)})
sns.set_palette("BrBG")
filter_age = data[data["Age_group"]!="0-100+"]
sns.catplot(x="Age_group", y="Total", kind="bar", data=filter_age,height=8.27, aspect=11.7/8.27);
```
# Conclusion
* Males tend to commit more suicides compared to Females in India
* Highest no. of suicide cases occur in Maharashtra, West Bengal, and Tamil Nadu, Andhra Pradesh.
* Male might commit more suicide compared to females in the future if this trend continues.
* People who commit suicide are mostly:
* Married
* Farmers and housewives
* Youngsters (15-29 age) and middle age (30-44)
| github_jupyter |
# Tutorial 0a: Setting Up Python For Scientific Computing
In this tutorial, we will set up a scientific Python computing environment using the [Anaconda python distribution by Continuum Analytics](https://www.continuum.io/downloads).
## Why Python?
As is true in human language, there are [hundreds of computer programming languages](https://en.wikipedia.org/wiki/List_of_programming_languages). While each has its own merit, the major languages for scientific computing are C, C++, R, MATLAB, Python, Java, Julia, and Fortran. [MATLAB](https://www.mathworks.com), [Julia](https://julialang.org/), and [Python](https://www.python.org) are similar in syntax and typically read as if they were written in plain english. This makes both languages a useful tool for teaching but they are also very powerful languages and are **very** actively used in real-life research. MATLAB is proprietary while Python is open source. A benefit of being open source is that anyone can write and release Python packages. For science, there are many wonderful community-driven packages such as [NumPy](http://www.numpy.org), [SciPy](http://www.scipy.org), [scikit-image](http://scikit-image.org), and [Pandas](http://pandas.pydata.org) just to name a few.
In this tutorial, we will set up a scientific Python computing environment using the [Anaconda python distribution by Continuum Analytics](https://www.continuum.io/downloads).
## Why Python?
- Beginner friendly
- Versatile and flexible
- Most mature package libraries around
- Most popular in Machine learning world
As is true in human language, there are [hundreds of computer programming languages](https://en.wikipedia.org/wiki/List_of_programming_languages). While each has its own merit, the major languages for scientific computing are C, C++, R, MATLAB, Python, Java, Julia, and Fortran. [MATLAB](https://www.mathworks.com), [Julia](https://julialang.org/), and [Python](https://www.python.org) are similar in syntax and typically read as if they were written in plain english. This makes both languages a useful tool for teaching but they are also very powerful languages and are **very** actively used in real-life research. MATLAB is proprietary while Python is open source. A benefit of being open source is that anyone can write and release Python packages. For science, there are many wonderful community-driven packages such as [NumPy](http://www.numpy.org), [SciPy](http://www.scipy.org), [scikit-image](http://scikit-image.org), and [Pandas](http://pandas.pydata.org) just to name a few.
## Installing Python 3 with Anaconda
### Python 3 vs Python 2
There are two dominant versions of Python (available through the Anaconda distribution) used for scientific computing, Python 2.7 and Python 3.7. We are at an interesting crossroads between these two versions. The most recent release (Python 3.10 ) is not backwards compatible with previous versions of Python. While there are still some packages written for Python 2.7 that have not been modified for compatibility with Python 3.7, a large number have transitioned and Python 2.7 will no longer be supported as of January 1, 2020. As this will be the future for scientific computing with Python, we will use Python 3.9 for these tutorials.
### Anaconda
There are several scientific Python distributions available for MacOS, Windows, and Linux. The two most popular, [Enthought Canopy](https://www.enthought.com/products/canopy/) and [Anaconda](https://www.continuum.io/why-anaconda) are specifically designed for scientific computing and data science work. For this course, we will use the Anaconda Python 3.7 distribution. To install the correct version, follow the instructions below.
1. Navigate to [the Anaconda download page](https://www.continuum.io/downloads) and download the Python 3.7 graphical installer.
2. Launch the installer and follow the onscreen instructions.
Congratulations! You now have the beginnings of a scientific Python distribution.
### Using JupyterLab as a Scientific Development Environment
Packaged with the Anaconda Python distribution is the [Jupyter project](https://jupyter.org/). This environment is incredibly useful for interactive programming and development and is widely used across scientific computing. Jupyter allows for interactive programming in a large array of programming languages including Julia, R, and MATLAB. As you've guessed by this point, we will be focusing on using Python through the Jupyter Environment.
The key component of the Jupyter interactive programming environment is the [Jupyter Notebook](https://jupyter.org/). This acts lkike an interactive script which allows one to interweave code, mathematics, and text to create a complete narrative around your computational project. In fact, you are reading a Jupyter Notebook right now!
While Jupyter Notebooks are fantastic alone, we will be using them throughout the course via the [JupyterLab Integrated Development Environment (IDE)](https://jupyter.org/). JupyterLab allows omne to write code in notebooks, navigate around your file system, write isolated python scripts, and even access a UNIX terminal, all of which we will do throughout this class. Even better, JupyterLab comes prepackaged with your Anaconda Python distribution.
### Launching JupyterLab
When you installed Anaconda, you also installed the Anaconda Navigator, an app that allows you to easily launch a JupyterLab instance. When you open up Anaconda Navigator, you should see a screen that looks like this,

where I have boxed in the JupyterLab prompt with a red box. Launch the JupyterLab IDE by clicking the 'launch' button. This should automatically open a browser window with the JupyterLab interface,

### Creating your course directory
During the course, you will be handing in the computational portions of your homeworks as Jupyter Notebooks and, as such, it will be important for the TA's to be able to run your code to grade it. We will often be reading in data from a file on your computer, manipulating it, and then plotting the outcome. **To ensure the TA's can run your code without manipulating it, you MUST use a specific file structure.** We can set up the file structure pretty easily directly through JupyterLab.
Open the side bar of the JupyterLab interface by clicking the folder icon on the left hand side of the screen. This will slide open a file browser like so:
<center>
<img src="filebrowser.png" width="50%">
</center>
Your files will look different than mine (unless you're using my computer!), but it will show the contents of your computer's `home` directory.
Using the sidebar, navigate to wherever you will want to make a new folder called `Scientific-Computing` by clicking the "new folder" symbol, .
Double-click the `Scientific-Computing` folder to open it and make two new folders, one named `code` and another `data`. Your final file directory should look like so:
<center>
<img src="directory_structure.png" width="50%">
</center>
That's it! You've now made the file structure for the class.
All of the Jupyter Notebooks you use in the course will be made and wirttin in the `code` folder. All data you have to load will live in the `data` directory. This structure will make things easier for the TA when it comes to grading your work, but will also help you maintain a tidy homework folder.
### Starting A Jupyter Notebook
Let's open a new notebook. Navigate to your `code` folder and click the `+` in the sidebar. This will open a new "Launcher" window where a variety of new filetypes can be opened. One of them will be a "Python 3 Notebook".
<center>
<img src="launcher.png" width="50%">
</center>
Clicking this will open a new Jupyter Nook named `Untitled.ipynb`.
<center>
<img src="notebook.png" width="50%">
</center>
Right-click the "Untitled.ipynb" in the sidebar and rename it to something more informative, say `testing_out_python.ipynb`.
The right-ha d side of your screen is the actual notebook. You will see a "code cell" (grey rectangle) along with a bunch of other boxes above it. In the [Jupyter Notebook Tutorial](http://rpgroup.caltech.edu/bige105/tutorials/t0b/t0b_jupyter_notebooks) we cover these buttons in detail. For now, we'll just check to make sure you have a working Python distribution.
## `Hello, World`
Let's write our first bit of Python code to make sure that everything is working correctly on your system. In Jupyter Notebooks, all code is typed in grey rectangles called "code cells". When a cell is "run", the result of the computation is shown underneath the code cell. Double-click the code cell on the right-hand side of your JupyterLab window and type the following:
```
# This a comment and won't be read by Python. All comments start with `#`
print('Hello, World. Long time, no see. This sentence should be printed below by pressing `Shift + Enter` ')
```
Note that you cannot edit the text *below* the code cell. This is the output of the `print()` function in Python.
### Our First Plot
This class will often require you to generate plots of your computations coupled with some comments about your interpretation. Let's try to generate a simple plot here to make sure everything is working with your distribution. Don't worry too much about the syntax for right now. The basics of Python syntax are given in [Tutorial 0c](http://rpgroup.caltech.edu/bige105/tutorials/t0b/t0c_python_syntax_and_plotting).
Add a new code cell beneath the one that contains `print('Hello, Pangaea')`. When you execute a cell using `Shift + Enter`, a new cell should appear beneath what you just ran. If it's not there, you can make a new cell by clicking the `+` icon in the notebook menu bar. In the new cell, type the following:
```
# Import Python packages necessary for this script
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
# Generate a beautiful sinusoidal curve
x = np.linspace(0, 2*np.pi, 500)
y = np.sin(2 * np.sin(2 * np.sin(2 * x)))
plt.plot(x, y)
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.show()
```
If you can see this plot in your notebook, then congratulations! You have a working Python 3.7 distribution.
### Installing extra packages using Conda
With the Anaconda Python distribution, you can install verified packages (scientific and non-scientific) through the [Conda](http://conda.pydata.org/docs/) package manager. **Note that you do not have to download Conda separately. This comes packaged with Anaconda**. To install packages through Conda, we must manually enter their names on the command line.
One of your first computational homeworks will involve doing some rudimentary bioinformatics to compare sequences of the `ENAM` gene among cetaceans. To do so, we will use the [BioPython](http://biopython.org) package which does not come prepackaged along with Anaconda. Let's install it using the command line that is built in with Jupyter Lab.
On the sidebar menu, open a new Launcher window by clicking the `+` button (just like we did to make a new Jupyter Notebook). Now, instead of opening a notebook, choose the "Terminal" selection at the bottom.
<center>
<img src="launch_terminal.png" width="50%">
</center>
This will open a new tab on the right-hand side of your screen and will launch a shell environment (yours may look different than mine). Click on the command line, type
```
conda install xxx
```
and hit enter. After a few seconds (or a minute, depending on your internet connection), you should be greeted with the following screen:
<center>
<img src="install_biopython.png" width="50%">
</center>
Note that at the bottom it asks for your permission to install teh package and update its dependencies, if necessary. Type `y` and then hit enter. Biopython will then be installed.
| github_jupyter |
# CNN Image Data Preview & Statistics
### Welcome!
This notebook allows you to preview some of your single-cell image patches to make sure your annotated data are of good quality. You will also get a chance to calculate the statistics for your annotated data which can be useful for data preprocessing, e.g. *class imbalance check* prior to CNN training.
```
import os
import json
import random
import zipfile
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from datetime import datetime
from skimage.io import imread
```
### Specify how many patches you'd like to visualise from your batch:
By default, the code below will allow you to see any 10 random patches per each class. If there is not enough training data for any label, a noisy image will be visualised. The default setting doesn't save the collage out, but you can change it by specifying the ```save_collage``` to ```True```.
```
LABELS = ["Interphase", "Prometaphase", "Metaphase", "Anaphase", "Apoptosis"]
patches_to_show = 10
save_collage = False
```
### Load a random 'annotation' zip file to check image patches:
```
zipfiles = [f for f in os.listdir("./") if f.startswith("annotation") and f.endswith(".zip")]
zip_file_name = zipfiles[0]
```
### Optional: specify which zip file you'd like to visualise:
```
#zip_file_name = "annotation_02-08-2021--10-33-59.zip"
```
### Process the zip file & extract subfolders with individual images:
```
# Make sure zip file name is stripped of '.zip' suffix:
if zip_file_name.endswith(".zip"):
zip_file_name = zip_file_name.split(".zip")[0]
# Check if the zipfile was extracted:
if not zip_file_name in os.listdir("./"):
print (f"Zip file {zip_file_name}.zip : Exporting...", end="\t")
with zipfile.ZipFile(f"./{zip_file_name}.zip", 'r') as zip_ref:
zip_ref.extractall(f"./{zip_file_name}/")
else:
print (f"Zip file {zip_file_name}.zip : Exported!...", end="\t")
print ("Done!")
```
### Plot the collage with all 5 labels:
```
fig, axs = plt.subplots(figsize=(int(len(LABELS)*5), int(patches_to_show*5)),
nrows=patches_to_show, ncols=len(LABELS),
sharex=True, sharey=True)
for idx in range(len(LABELS)):
label = LABELS[idx]
label_dr = f"./{zip_file_name}/{label}/"
# Check if directory exists:
if os.path.isdir(label_dr):
patch_list = os.listdir(label_dr)
random.shuffle(patch_list)
print (f"Label: {label} contains {len(patch_list)} single-cell image patches")
else:
patch_list = []
print (f"Label: {label} has not been annotated.")
# Plot the patches:
for i in range(patches_to_show):
# Set titles to individual columns
if i == 0:
axs[i][idx].set_title(f"Label: {label}", fontsize=16)
if i >= len(patch_list):
patch = np.random.randint(0,255,size=(64,64)).astype(np.uint8)
axs[i][idx].text(x=32, y=32, s="noise", size=50, rotation=30., ha="center", va="center",
bbox=dict(boxstyle="round", ec=(0.0, 0.0, 0.0), fc=(1.0, 1.0, 1.0)))
else:
patch = plt.imread(label_dr + patch_list[i])
axs[i][idx].imshow(patch, cmap="binary_r")
axs[i][idx].axis('off')
if save_collage is True:
plt.savefig("../label_image_patches.png", bbox_to_inches='tight')
plt.show()
plt.close()
```
## Calculate some data statistics WITHOUT unzipping the files:
```
label_count = dict({'Prometaphase' : 0, 'Metaphase' : 0, 'Interphase' : 0, 'Anaphase' : 0, 'Apoptosis' : 0})
for f in tqdm(zipfiles):
archive = zipfile.ZipFile(f, 'r')
json_data = archive.read(f.split(".zip")[0] + ".json")
data = json.loads(json_data)
# Count instances per label:
counts = [[x, data['labels'].count(x)] for x in set(data['labels'])]
print (f"File: {f}\n\t{counts}")
# Add counts to label counter:
for lab in counts:
label_count[lab[0]] += lab[1]
```
### Plot the statistics:
```
COLOR_CYCLE = [
'#1f77b4', # blue
'#ff7f0e', # orange
'#2ca02c', # green
'#d62728', # red
'#9467bd', # purple
]
# Plot the bar graph:
plt.bar(range(len(label_count)), list(label_count.values()), align='center', color=COLOR_CYCLE)
plt.xticks(range(len(label_count)), list(label_count.keys()), rotation=30)
plt.title("Single-Cell Patches per Label")
plt.xlabel("Class Label")
plt.ylabel("Patch Count")
plt.grid(axis='y', alpha=0.3)
plt.show()
plt.close()
```
### Done!
| github_jupyter |
```
emails = ['[email protected]',
'[email protected]',
'[email protected]',
'[email protected]',
'[email protected]',
'[email protected]',
'[email protected]',
'[email protected]',
'[email protected]',
'[email protected]',
'[email protected]',
'[email protected]',
'[email protected]',
'[email protected]',
'[email protected]',
'[email protected]',
'[email protected]',
'[email protected]',
'[email protected]',
'[email protected]',
'[email protected]',]
TEXT = """
Boa noite
Envio email na sequência do pedido de apoio por parte da vossa instituição.
Gostava apenas de saber se chegou a vossa instituição algum tipo de equipamento, tendo em conta que me foi dito que seriam enviadas viseiras durante esta semana.
Caso não tenham recebido nada, pedia que me alertassem para conseguir perceber o que se passa com os apoios que conseguimos agregar.
Obrigado pela atenção,
Gustavo.
"""
TEXT = """
Bom dia,
O meu nome é Gustavo Carita, tenho 27 anos, sou de Lisboa e sou engenheiro.
Recentemente apercebi-me que, na actual conjuntura, existe uma carência nacional de profissionais para apoiar IPSS-Instituicoes Privadas de Solidariedade Social e decidi meter mãos á obra para tentar ajudar.
Eu e mais uns amigos criámos um website para ajudar as IPSS, duma forma simples e eficaz.
Pode verificar o website em: https://voluntarios-covid19.pt/
Neste momento estamos a comunicar com todas as IPSS presentes na plataforma http://cartasocial.pt/.
Para divulgarem a ajuda que precisam nesta fase, basta preencher o seguinte formulário: https://forms.gle/nC2GNNMcW8pyXiYw7
Daremos o nosso melhor para promover as vossas iniciativas e obter toda a ajuda necessária.
Obrigado,
Gustavo.
<[email protected]>
"""
TO = '[email protected]'
SUBJECT = 'IPSS Trial'
# Gmail Sign In
gmail_sender = '[email protected]'
gmail_passwd = 'voluntariado123@'
def create_message(sender, to, subject, message_text):
"""Create a message for an email.
Args:
sender: Email address of the sender.
to: Email address of the receiver.
subject: The subject of the email message.
message_text: The text of the email message.
Returns:
An object containing a base64url encoded email object.
"""
message = MIMEText(message_text)
message['to'] = to
message['from'] = sender
message['subject'] = subject
b64_bytes = base64.urlsafe_b64encode(message.as_bytes())
b64_string = b64_bytes.decode()
body = {'raw': b64_string}
return body
from email.mime.text import MIMEText
import base64
from googleapiclient.discovery import build
import pickle
from tqdm import tqdm
from time import sleep
import random
with open('../../ipss_mailing/token.pickle', 'rb') as token:
creds = pickle.load(token)
service = build('gmail', 'v1', credentials=creds)
def send_message(service, user_id, message):
"""Send an email message.
Args:
service: Authorized Gmail API service instance.
user_id: User's email address. The special value "me"
can be used to indicate the authenticated user.
message: Message to be sent.
Returns:
Sent Message.
"""
try:
message = (service.users().messages().send(userId=user_id, body=message)
.execute())
print('Message Id: %s' % message['id'])
return message
except Exception as e:
print(e)
temp = open('../../ipss_mailing/emails.txt', 'r').readlines()
temp = [email.replace('\n', '') for email in temp]
temp = [
'[email protected]',
'[email protected]'
] + emails
for t in tqdm(temp):
try:
send_message(service, "me", create_message(
'[email protected]',
t,
'Voluntarios COVID19 - Confirmação',
TEXT
))
sleep(random.randint(0,2))
except Exception as e:
print(e)
print(t)
```
| github_jupyter |
```
%%capture
!pip install openmined_psi
import syft as sy
duet = sy.join_duet(loopback=True)
import openmined_psi as psi
class PsiClientDuet:
def __init__(self, duet, timeout_secs=-1):
self.duet = duet
# get the reveal intersection flag and create a client
reveal_intersection_ptr = self.duet.store["reveal_intersection"]
reveal_intersection = reveal_intersection_ptr.get(
request_block=True,
name="reveal_intersection",
reason="Are we revealing or not?",
timeout_secs=timeout_secs,
delete_obj=True
)
self.reveal_intersection = reveal_intersection
self.client = psi.client.CreateWithNewKey(reveal_intersection)
# get the ServerSetup message
setup_ptr = self.duet.store["setup"]
self.setup = setup_ptr.get(
request_block=True,
name="setup",
reason="To get the server setup",
timeout_secs=timeout_secs,
delete_obj=True
)
def intersect(self, client_items, timeout_secs=-1):
# send the client request to the server
self.duet.requests.add_handler(
name="request",
action="accept"
)
request = self.client.CreateRequest(client_items)
request_ptr = request.tag("request").send(self.duet, searchable = True)
# block until a response is received from the server
while True:
try:
self.duet.store["response"]
except:
continue
break
# get the response from the server
response_ptr = self.duet.store["response"]
response = response_ptr.get(
request_block=True,
name="response",
reason="To get the response",
timeout_secs=timeout_secs,
delete_obj=True
)
# calculate the intersection
if self.reveal_intersection:
return self.client.GetIntersection(self.setup, response)
else:
return self.client.GetIntersectionSize(self.setup, response)
client_items = ["Element " + str(i) for i in range(1000)]
client = PsiClientDuet(duet)
intersection = client.intersect(client_items)
if client.reveal_intersection:
iset = set(intersection)
for idx in range(len(client_items)):
if idx % 2 == 0:
assert idx in iset
else:
assert idx not in iset
if not client.reveal_intersection:
assert intersection >= (len(client_items) / 2.0)
assert intersection <= (1.1 * len(client_items) / 2.0)
```
| github_jupyter |
# Encoding of categorical variables
In this notebook, we will present typical ways of dealing with
**categorical variables** by encoding them, namely **ordinal encoding** and
**one-hot encoding**.
Let's first load the entire adult dataset containing both numerical and
categorical data.
```
import pandas as pd
adult_census = pd.read_csv("../datasets/adult-census.csv")
# drop the duplicated column `"education-num"` as stated in the first notebook
adult_census = adult_census.drop(columns="education-num")
target_name = "class"
target = adult_census[target_name]
data = adult_census.drop(columns=[target_name])
```
## Identify categorical variables
As we saw in the previous section, a numerical variable is a
quantity represented by a real or integer number. These variables can be
naturally handled by machine learning algorithms that are typically composed
of a sequence of arithmetic instructions such as additions and
multiplications.
In contrast, categorical variables have discrete values, typically
represented by string labels (but not only) taken from a finite list of
possible choices. For instance, the variable `native-country` in our dataset
is a categorical variable because it encodes the data using a finite list of
possible countries (along with the `?` symbol when this information is
missing):
```
data["native-country"].value_counts().sort_index()
```
How can we easily recognize categorical columns among the dataset? Part of
the answer lies in the columns' data type:
```
data.dtypes
```
If we look at the `"native-country"` column, we observe its data type is
`object`, meaning it contains string values.
## Select features based on their data type
In the previous notebook, we manually defined the numerical columns. We could
do a similar approach. Instead, we will use the scikit-learn helper function
`make_column_selector`, which allows us to select columns based on
their data type. We will illustrate how to use this helper.
```
from sklearn.compose import make_column_selector as selector
categorical_columns_selector = selector(dtype_include=object)
categorical_columns = categorical_columns_selector(data)
categorical_columns
```
Here, we created the selector by passing the data type to include; we then
passed the input dataset to the selector object, which returned a list of
column names that have the requested data type. We can now filter out the
unwanted columns:
```
data_categorical = data[categorical_columns]
data_categorical.head()
print(f"The dataset is composed of {data_categorical.shape[1]} features")
```
In the remainder of this section, we will present different strategies to
encode categorical data into numerical data which can be used by a
machine-learning algorithm.
## Strategies to encode categories
### Encoding ordinal categories
The most intuitive strategy is to encode each category with a different
number. The `OrdinalEncoder` will transform the data in such manner.
We will start by encoding a single column to understand how the encoding
works.
```
from sklearn.preprocessing import OrdinalEncoder
education_column = data_categorical[["education"]]
encoder = OrdinalEncoder()
education_encoded = encoder.fit_transform(education_column)
education_encoded
```
We see that each category in `"education"` has been replaced by a numeric
value. We could check the mapping between the categories and the numerical
values by checking the fitted attribute `categories_`.
```
encoder.categories_
```
Now, we can check the encoding applied on all categorical features.
```
data_encoded = encoder.fit_transform(data_categorical)
data_encoded[:5]
print(
f"The dataset encoded contains {data_encoded.shape[1]} features")
```
We see that the categories have been encoded for each feature (column)
independently. We also note that the number of features before and after the
encoding is the same.
However, be careful when applying this encoding strategy:
using this integer representation leads downstream predictive models
to assume that the values are ordered (0 < 1 < 2 < 3... for instance).
By default, `OrdinalEncoder` uses a lexicographical strategy to map string
category labels to integers. This strategy is arbitrary and often
meaningless. For instance, suppose the dataset has a categorical variable
named `"size"` with categories such as "S", "M", "L", "XL". We would like the
integer representation to respect the meaning of the sizes by mapping them to
increasing integers such as `0, 1, 2, 3`.
However, the lexicographical strategy used by default would map the labels
"S", "M", "L", "XL" to 2, 1, 0, 3, by following the alphabetical order.
The `OrdinalEncoder` class accepts a `categories` constructor argument to
pass categories in the expected ordering explicitly. You can find more
information in the
[scikit-learn documentation](https://scikit-learn.org/stable/modules/preprocessing.html#encoding-categorical-features)
if needed.
If a categorical variable does not carry any meaningful order information
then this encoding might be misleading to downstream statistical models and
you might consider using one-hot encoding instead (see below).
### Encoding nominal categories (without assuming any order)
`OneHotEncoder` is an alternative encoder that prevents the downstream
models to make a false assumption about the ordering of categories. For a
given feature, it will create as many new columns as there are possible
categories. For a given sample, the value of the column corresponding to the
category will be set to `1` while all the columns of the other categories
will be set to `0`.
We will start by encoding a single feature (e.g. `"education"`) to illustrate
how the encoding works.
```
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder(sparse=False)
education_encoded = encoder.fit_transform(education_column)
education_encoded
```
<div class="admonition note alert alert-info">
<p class="first admonition-title" style="font-weight: bold;">Note</p>
<p><tt class="docutils literal">sparse=False</tt> is used in the <tt class="docutils literal">OneHotEncoder</tt> for didactic purposes, namely
easier visualization of the data.</p>
<p class="last">Sparse matrices are efficient data structures when most of your matrix
elements are zero. They won't be covered in detail in this course. If you
want more details about them, you can look at
<a class="reference external" href="https://scipy-lectures.org/advanced/scipy_sparse/introduction.html#why-sparse-matrices">this</a>.</p>
</div>
We see that encoding a single feature will give a NumPy array full of zeros
and ones. We can get a better understanding using the associated feature
names resulting from the transformation.
```
feature_names = encoder.get_feature_names_out(input_features=["education"])
education_encoded = pd.DataFrame(education_encoded, columns=feature_names)
education_encoded
```
As we can see, each category (unique value) became a column; the encoding
returned, for each sample, a 1 to specify which category it belongs to.
Let's apply this encoding on the full dataset.
```
print(
f"The dataset is composed of {data_categorical.shape[1]} features")
data_categorical.head()
data_encoded = encoder.fit_transform(data_categorical)
data_encoded[:5]
print(
f"The encoded dataset contains {data_encoded.shape[1]} features")
```
Let's wrap this NumPy array in a dataframe with informative column names as
provided by the encoder object:
```
columns_encoded = encoder.get_feature_names_out(data_categorical.columns)
pd.DataFrame(data_encoded, columns=columns_encoded).head()
```
Look at how the `"workclass"` variable of the 3 first records has been
encoded and compare this to the original string representation.
The number of features after the encoding is more than 10 times larger than
in the original data because some variables such as `occupation` and
`native-country` have many possible categories.
### Choosing an encoding strategy
Choosing an encoding strategy will depend on the underlying models and the
type of categories (i.e. ordinal vs. nominal).
<div class="admonition note alert alert-info">
<p class="first admonition-title" style="font-weight: bold;">Note</p>
<p class="last">In general <tt class="docutils literal">OneHotEncoder</tt> is the encoding strategy used when the
downstream models are <strong>linear models</strong> while <tt class="docutils literal">OrdinalEncoder</tt> is often a
good strategy with <strong>tree-based models</strong>.</p>
</div>
Using an `OrdinalEncoder` will output ordinal categories. This means
that there is an order in the resulting categories (e.g. `0 < 1 < 2`). The
impact of violating this ordering assumption is really dependent on the
downstream models. Linear models will be impacted by misordered categories
while tree-based models will not.
You can still use an `OrdinalEncoder` with linear models but you need to be
sure that:
- the original categories (before encoding) have an ordering;
- the encoded categories follow the same ordering than the original
categories.
The **next exercise** highlights the issue of misusing `OrdinalEncoder` with
a linear model.
One-hot encoding categorical variables with high cardinality can cause
computational inefficiency in tree-based models. Because of this, it is not recommended
to use `OneHotEncoder` in such cases even if the original categories do not
have a given order. We will show this in the **final exercise** of this sequence.
## Evaluate our predictive pipeline
We can now integrate this encoder inside a machine learning pipeline like we
did with numerical data: let's train a linear classifier on the encoded data
and check the generalization performance of this machine learning pipeline using
cross-validation.
Before we create the pipeline, we have to linger on the `native-country`.
Let's recall some statistics regarding this column.
```
data["native-country"].value_counts()
```
We see that the `Holand-Netherlands` category is occurring rarely. This will
be a problem during cross-validation: if the sample ends up in the test set
during splitting then the classifier would not have seen the category during
training and will not be able to encode it.
In scikit-learn, there are two solutions to bypass this issue:
* list all the possible categories and provide it to the encoder via the
keyword argument `categories`;
* use the parameter `handle_unknown`.
Here, we will use the latter solution for simplicity.
<div class="admonition tip alert alert-warning">
<p class="first admonition-title" style="font-weight: bold;">Tip</p>
<p class="last">Be aware the <tt class="docutils literal">OrdinalEncoder</tt> exposes as well a parameter
<tt class="docutils literal">handle_unknown</tt>. It can be set to <tt class="docutils literal">use_encoded_value</tt> and by setting
<tt class="docutils literal">unknown_value</tt> to handle rare categories. You are going to use these
parameters in the next exercise.</p>
</div>
We can now create our machine learning pipeline.
```
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
model = make_pipeline(
OneHotEncoder(handle_unknown="ignore"), LogisticRegression(max_iter=500)
)
```
<div class="admonition note alert alert-info">
<p class="first admonition-title" style="font-weight: bold;">Note</p>
<p class="last">Here, we need to increase the maximum number of iterations to obtain a fully
converged <tt class="docutils literal">LogisticRegression</tt> and silence a <tt class="docutils literal">ConvergenceWarning</tt>. Contrary
to the numerical features, the one-hot encoded categorical features are all
on the same scale (values are 0 or 1), so they would not benefit from
scaling. In this case, increasing <tt class="docutils literal">max_iter</tt> is the right thing to do.</p>
</div>
Finally, we can check the model's generalization performance only using the
categorical columns.
```
from sklearn.model_selection import cross_validate
cv_results = cross_validate(model, data_categorical, target)
cv_results
scores = cv_results["test_score"]
print(f"The accuracy is: {scores.mean():.3f} +/- {scores.std():.3f}")
```
As you can see, this representation of the categorical variables is
slightly more predictive of the revenue than the numerical variables
that we used previously.
In this notebook we have:
* seen two common strategies for encoding categorical features: **ordinal
encoding** and **one-hot encoding**;
* used a **pipeline** to use a **one-hot encoder** before fitting a logistic
regression.
| github_jupyter |
# Box Plots
The following illustrates some options for the boxplot in statsmodels. These include `violin_plot` and `bean_plot`.
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
```
## Bean Plots
The following example is taken from the docstring of `beanplot`.
We use the American National Election Survey 1996 dataset, which has Party
Identification of respondents as independent variable and (among other
data) age as dependent variable.
```
data = sm.datasets.anes96.load_pandas()
party_ID = np.arange(7)
labels = ["Strong Democrat", "Weak Democrat", "Independent-Democrat",
"Independent-Independent", "Independent-Republican",
"Weak Republican", "Strong Republican"]
```
Group age by party ID, and create a violin plot with it:
```
plt.rcParams['figure.subplot.bottom'] = 0.23 # keep labels visible
plt.rcParams['figure.figsize'] = (10.0, 8.0) # make plot larger in notebook
age = [data.exog['age'][data.endog == id] for id in party_ID]
fig = plt.figure()
ax = fig.add_subplot(111)
plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
'label_fontsize':'small',
'label_rotation':30}
sm.graphics.beanplot(age, ax=ax, labels=labels,
plot_opts=plot_opts)
ax.set_xlabel("Party identification of respondent.")
ax.set_ylabel("Age")
#plt.show()
def beanplot(data, plot_opts={}, jitter=False):
"""helper function to try out different plot options
"""
fig = plt.figure()
ax = fig.add_subplot(111)
plot_opts_ = {'cutoff_val':5, 'cutoff_type':'abs',
'label_fontsize':'small',
'label_rotation':30}
plot_opts_.update(plot_opts)
sm.graphics.beanplot(data, ax=ax, labels=labels,
jitter=jitter, plot_opts=plot_opts_)
ax.set_xlabel("Party identification of respondent.")
ax.set_ylabel("Age")
fig = beanplot(age, jitter=True)
fig = beanplot(age, plot_opts={'violin_width': 0.5, 'violin_fc':'#66c2a5'})
fig = beanplot(age, plot_opts={'violin_fc':'#66c2a5'})
fig = beanplot(age, plot_opts={'bean_size': 0.2, 'violin_width': 0.75, 'violin_fc':'#66c2a5'})
fig = beanplot(age, jitter=True, plot_opts={'violin_fc':'#66c2a5'})
fig = beanplot(age, jitter=True, plot_opts={'violin_width': 0.5, 'violin_fc':'#66c2a5'})
```
## Advanced Box Plots
Based of example script `example_enhanced_boxplots.py` (by Ralf Gommers)
```
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
# Necessary to make horizontal axis labels fit
plt.rcParams['figure.subplot.bottom'] = 0.23
data = sm.datasets.anes96.load_pandas()
party_ID = np.arange(7)
labels = ["Strong Democrat", "Weak Democrat", "Independent-Democrat",
"Independent-Independent", "Independent-Republican",
"Weak Republican", "Strong Republican"]
# Group age by party ID.
age = [data.exog['age'][data.endog == id] for id in party_ID]
# Create a violin plot.
fig = plt.figure()
ax = fig.add_subplot(111)
sm.graphics.violinplot(age, ax=ax, labels=labels,
plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
'label_fontsize':'small',
'label_rotation':30})
ax.set_xlabel("Party identification of respondent.")
ax.set_ylabel("Age")
ax.set_title("US national election '96 - Age & Party Identification")
# Create a bean plot.
fig2 = plt.figure()
ax = fig2.add_subplot(111)
sm.graphics.beanplot(age, ax=ax, labels=labels,
plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
'label_fontsize':'small',
'label_rotation':30})
ax.set_xlabel("Party identification of respondent.")
ax.set_ylabel("Age")
ax.set_title("US national election '96 - Age & Party Identification")
# Create a jitter plot.
fig3 = plt.figure()
ax = fig3.add_subplot(111)
plot_opts={'cutoff_val':5, 'cutoff_type':'abs', 'label_fontsize':'small',
'label_rotation':30, 'violin_fc':(0.8, 0.8, 0.8),
'jitter_marker':'.', 'jitter_marker_size':3, 'bean_color':'#FF6F00',
'bean_mean_color':'#009D91'}
sm.graphics.beanplot(age, ax=ax, labels=labels, jitter=True,
plot_opts=plot_opts)
ax.set_xlabel("Party identification of respondent.")
ax.set_ylabel("Age")
ax.set_title("US national election '96 - Age & Party Identification")
# Create an asymmetrical jitter plot.
ix = data.exog['income'] < 16 # incomes < $30k
age = data.exog['age'][ix]
endog = data.endog[ix]
age_lower_income = [age[endog == id] for id in party_ID]
ix = data.exog['income'] >= 20 # incomes > $50k
age = data.exog['age'][ix]
endog = data.endog[ix]
age_higher_income = [age[endog == id] for id in party_ID]
fig = plt.figure()
ax = fig.add_subplot(111)
plot_opts['violin_fc'] = (0.5, 0.5, 0.5)
plot_opts['bean_show_mean'] = False
plot_opts['bean_show_median'] = False
plot_opts['bean_legend_text'] = 'Income < \$30k'
plot_opts['cutoff_val'] = 10
sm.graphics.beanplot(age_lower_income, ax=ax, labels=labels, side='left',
jitter=True, plot_opts=plot_opts)
plot_opts['violin_fc'] = (0.7, 0.7, 0.7)
plot_opts['bean_color'] = '#009D91'
plot_opts['bean_legend_text'] = 'Income > \$50k'
sm.graphics.beanplot(age_higher_income, ax=ax, labels=labels, side='right',
jitter=True, plot_opts=plot_opts)
ax.set_xlabel("Party identification of respondent.")
ax.set_ylabel("Age")
ax.set_title("US national election '96 - Age & Party Identification")
# Show all plots.
#plt.show()
```
| github_jupyter |
# TEST for matrix_facto_10_embeddings_100_epochs
# Deep recommender on top of Amason’s Clean Clothing Shoes and Jewelry explicit rating dataset
Frame the recommendation system as a rating prediction machine learning problem and create a hybrid architecture that mixes the collaborative and content based filtering approaches:
- Collaborative part: Predict items ratings in order to recommend to the user items that he is likely to rate high.
- Content based: use metadata inputs (such as price and title) about items to find similar items to recommend.
### - Create 2 explicit recommendation engine models based on 2 machine learning architecture using Keras:
1. a matrix factorization model
2. a deep neural network model.
### Compare the results of the different models and configurations to find the "best" predicting model
### Used the best model for recommending items to users
```
### name of model
modname = 'matrix_facto_10_embeddings_100_epochs'
### number of epochs
num_epochs = 100
### size of embedding
embedding_size = 10
# import sys
# !{sys.executable} -m pip install --upgrade pip
# !{sys.executable} -m pip install sagemaker-experiments
# !{sys.executable} -m pip install pandas
# !{sys.executable} -m pip install numpy
# !{sys.executable} -m pip install matplotlib
# !{sys.executable} -m pip install boto3
# !{sys.executable} -m pip install sagemaker
# !{sys.executable} -m pip install pyspark
# !{sys.executable} -m pip install ipython-autotime
# !{sys.executable} -m pip install surprise
# !{sys.executable} -m pip install smart_open
# !{sys.executable} -m pip install pyarrow
# !{sys.executable} -m pip install fastparquet
# Check Jave version
# !sudo yum -y update
# # Need to use Java 1.8.0
# !sudo yum remove jre-1.7.0-openjdk -y
!java -version
# !sudo update-alternatives --config java
# !pip install pyarrow fastparquet
# !pip install ipython-autotime
# !pip install tqdm pydot pydotplus pydot_ng
#### To measure all running time
# https://github.com/cpcloud/ipython-autotime
%load_ext autotime
%pylab inline
import warnings
warnings.filterwarnings("ignore")
%matplotlib inline
import re
import seaborn as sbn
import nltk
import tqdm as tqdm
import sqlite3
import pandas as pd
import numpy as np
from pandas import DataFrame
import string
import pydot
import pydotplus
import pydot_ng
import pickle
import time
import gzip
import os
os.getcwd()
import matplotlib.pyplot as plt
from math import floor,ceil
#from nltk.corpus import stopwords
#stop = stopwords.words("english")
from nltk.stem.porter import PorterStemmer
english_stemmer=nltk.stem.SnowballStemmer('english')
from nltk.tokenize import word_tokenize
from sklearn.metrics import accuracy_score, confusion_matrix,roc_curve, auc,classification_report, mean_squared_error, mean_absolute_error
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.svm import LinearSVC
from sklearn.neighbors import NearestNeighbors
from sklearn.linear_model import LogisticRegression
from sklearn import neighbors
from scipy.spatial.distance import cosine
from sklearn.feature_selection import SelectKBest
from IPython.display import SVG
# Tensorflow
import tensorflow as tf
#Keras
from keras.models import Sequential, Model, load_model, save_model
from keras.callbacks import ModelCheckpoint
from keras.layers import Dense, Activation, Dropout, Input, Masking, TimeDistributed, LSTM, Conv1D, Embedding
from keras.layers import GRU, Bidirectional, BatchNormalization, Reshape
from keras.optimizers import Adam
from keras.layers.core import Reshape, Dropout, Dense
from keras.layers.merge import Multiply, Dot, Concatenate
from keras.layers.embeddings import Embedding
from keras import optimizers
from keras.callbacks import ModelCheckpoint
from keras.utils.vis_utils import model_to_dot
```
### Set and Check GPUs
```
#Session
from keras import backend as K
def set_check_gpu():
cfg = K.tf.ConfigProto()
cfg.gpu_options.per_process_gpu_memory_fraction =1 # allow all of the GPU memory to be allocated
# for 8 GPUs
# cfg.gpu_options.visible_device_list = "0,1,2,3,4,5,6,7" # "0,1"
# for 1 GPU
cfg.gpu_options.visible_device_list = "0"
#cfg.gpu_options.allow_growth = True # # Don't pre-allocate memory; dynamically allocate the memory used on the GPU as-needed
#cfg.log_device_placement = True # to log device placement (on which device the operation ran)
sess = K.tf.Session(config=cfg)
K.set_session(sess) # set this TensorFlow session as the default session for Keras
print("* TF version: ", [tf.__version__, tf.test.is_gpu_available()])
print("* List of GPU(s): ", tf.config.experimental.list_physical_devices() )
print("* Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID";
# set for 8 GPUs
# os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3,4,5,6,7";
# set for 1 GPU
os.environ["CUDA_VISIBLE_DEVICES"] = "0";
# Tf debugging option
tf.debugging.set_log_device_placement(True)
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
# print(tf.config.list_logical_devices('GPU'))
print(tf.config.experimental.list_physical_devices('GPU'))
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
set_check_gpu()
# reset GPU memory& Keras Session
def reset_keras():
try:
del classifier
del model
except:
pass
K.clear_session()
K.get_session().close()
# sess = K.get_session()
cfg = K.tf.ConfigProto()
cfg.gpu_options.per_process_gpu_memory_fraction
# cfg.gpu_options.visible_device_list = "0,1,2,3,4,5,6,7" # "0,1"
cfg.gpu_options.visible_device_list = "0" # "0,1"
cfg.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU
sess = K.tf.Session(config=cfg)
K.set_session(sess) # set this TensorFlow session as the default session for Keras
```
## Load dataset and analysis using Spark
## Download and prepare Data:
#### 1. Read the data:
#### Read the data from the reviews dataset of amazon.
#### Use the dastaset in which all users and items have at least 5 reviews.
### Location of dataset: https://nijianmo.github.io/amazon/index.html
```
import pandas as pd
import boto3
import sagemaker
from sagemaker import get_execution_role
from sagemaker.session import Session
from sagemaker.analytics import ExperimentAnalytics
import gzip
import json
from pyspark.ml import Pipeline
from pyspark.sql.types import StructField, StructType, StringType, DoubleType
from pyspark.ml.feature import StringIndexer, VectorIndexer, OneHotEncoder, VectorAssembler
from pyspark.sql.functions import *
# spark imports
from pyspark.sql import SparkSession
from pyspark.sql.functions import UserDefinedFunction, explode, desc
from pyspark.sql.types import StringType, ArrayType
from pyspark.ml.evaluation import RegressionEvaluator
import os
import pandas as pd
import pyarrow
import fastparquet
# from pandas_profiling import ProfileReport
# !aws s3 cp s3://dse-cohort5-group1/2-Keras-DeepRecommender/dataset/Clean_Clothing_Shoes_and_Jewelry_5_clean.parquet ./data/
!ls -alh ./data
```
### Read clened dataset from parquet files
```
review_data = pd.read_parquet("./data/Clean_Clothing_Shoes_and_Jewelry_5_clean.parquet")
review_data[:3]
review_data.shape
```
### 2. Arrange and clean the data
Rearrange the columns by relevance and rename column names
```
review_data.columns
review_data = review_data[['asin', 'image', 'summary', 'reviewText', 'overall', 'reviewerID', 'reviewerName', 'reviewTime']]
review_data.rename(columns={ 'overall': 'score','reviewerID': 'user_id', 'reviewerName': 'user_name'}, inplace=True)
#the variables names after rename in the modified data frame
list(review_data)
```
# Add Metadata
### Metadata includes descriptions, price, sales-rank, brand info, and co-purchasing links
- asin - ID of the product, e.g. 0000031852
- title - name of the product
- price - price in US dollars (at time of crawl)
- imUrl - url of the product image
- related - related products (also bought, also viewed, bought together, buy after viewing)
- salesRank - sales rank information
- brand - brand name
- categories - list of categories the product belongs to
```
# !aws s3 cp s3://dse-cohort5-group1/2-Keras-DeepRecommender/dataset/Cleaned_meta_Clothing_Shoes_and_Jewelry.parquet ./data/
all_info = pd.read_parquet("./data/Cleaned_meta_Clothing_Shoes_and_Jewelry.parquet")
all_info.head(n=5)
```
### Arrange and clean the data
- Cleaning, handling missing data, normalization, etc:
- For the algorithm in keras to work, remap all item_ids and user_ids to an interger between 0 and the total number of users or the total number of items
```
all_info.columns
items = all_info.asin.unique()
item_map = {i:val for i,val in enumerate(items)}
inverse_item_map = {val:i for i,val in enumerate(items)}
all_info["old_item_id"] = all_info["asin"] # copying for join with metadata
all_info["item_id"] = all_info["asin"].map(inverse_item_map)
items = all_info.item_id.unique()
print ("We have %d unique items in metadata "%items.shape[0])
all_info['description'] = all_info['description'].fillna(all_info['title'].fillna('no_data'))
all_info['title'] = all_info['title'].fillna(all_info['description'].fillna('no_data').apply(str).str[:20])
all_info['image'] = all_info['image'].fillna('no_data')
all_info['price'] = pd.to_numeric(all_info['price'],errors="coerce")
all_info['price'] = all_info['price'].fillna(all_info['price'].median())
users = review_data.user_id.unique()
user_map = {i:val for i,val in enumerate(users)}
inverse_user_map = {val:i for i,val in enumerate(users)}
review_data["old_user_id"] = review_data["user_id"]
review_data["user_id"] = review_data["user_id"].map(inverse_user_map)
items_reviewed = review_data.asin.unique()
review_data["old_item_id"] = review_data["asin"] # copying for join with metadata
review_data["item_id"] = review_data["asin"].map(inverse_item_map)
items_reviewed = review_data.item_id.unique()
users = review_data.user_id.unique()
print ("We have %d unique users"%users.shape[0])
print ("We have %d unique items reviewed"%items_reviewed.shape[0])
# We have 192403 unique users in the "small" dataset
# We have 63001 unique items reviewed in the "small" dataset
review_data.head(3)
```
## Adding the review count and avarage to the metadata
```
#items_nb = review_data['old_item_id'].value_counts().reset_index()
items_avg = review_data.drop(['summary','reviewText','user_id','asin','user_name','reviewTime','old_user_id','item_id'],axis=1).groupby('old_item_id').agg(['count','mean']).reset_index()
items_avg.columns= ['old_item_id','num_ratings','avg_rating']
#items_avg.head(5)
items_avg['num_ratings'].describe()
all_info = pd.merge(all_info,items_avg,how='left',left_on='asin',right_on='old_item_id')
pd.set_option('display.max_colwidth', 100)
all_info.head(2)
```
# Explicit feedback (Reviewed Dataset) Recommender System
### Explicit feedback is when users gives voluntarily the rating information on what they like and dislike.
- In this case, I have explicit item ratings ranging from one to five.
- Framed the recommendation system as a rating prediction machine learning problem:
- Predict an item's ratings in order to be able to recommend to a user an item that he is likely to rate high if he buys it. `
### To evaluate the model, I randomly separate the data into a training and test set.
```
ratings_train, ratings_test = train_test_split( review_data, test_size=0.1, random_state=0)
ratings_train.shape
ratings_test.shape
```
## Adding Metadata to the train set
Create an architecture that mixes the collaborative and content based filtering approaches:
```
- Collaborative Part: Predict items ratings to recommend to the user items which he is likely to rate high according to learnt item & user embeddings (learn similarity from interactions).
- Content based part: Use metadata inputs (such as price and title) about items to recommend to the user contents similar to those he rated high (learn similarity of item attributes).
```
#### Adding the title and price - Add the metadata of the items in the training and test datasets.
```
# # creating metadata mappings
# titles = all_info['title'].unique()
# titles_map = {i:val for i,val in enumerate(titles)}
# inverse_titles_map = {val:i for i,val in enumerate(titles)}
# price = all_info['price'].unique()
# price_map = {i:val for i,val in enumerate(price)}
# inverse_price_map = {val:i for i,val in enumerate(price)}
# print ("We have %d prices" %price.shape)
# print ("We have %d titles" %titles.shape)
# all_info['price_id'] = all_info['price'].map(inverse_price_map)
# all_info['title_id'] = all_info['title'].map(inverse_titles_map)
# # creating dict from
# item2prices = {}
# for val in all_info[['item_id','price_id']].dropna().drop_duplicates().iterrows():
# item2prices[val[1]["item_id"]] = val[1]["price_id"]
# item2titles = {}
# for val in all_info[['item_id','title_id']].dropna().drop_duplicates().iterrows():
# item2titles[val[1]["item_id"]] = val[1]["title_id"]
# # populating the rating dataset with item metadata info
# ratings_train["price_id"] = ratings_train["item_id"].map(lambda x : item2prices[x])
# ratings_train["title_id"] = ratings_train["item_id"].map(lambda x : item2titles[x])
# # populating the test dataset with item metadata info
# ratings_test["price_id"] = ratings_test["item_id"].map(lambda x : item2prices[x])
# ratings_test["title_id"] = ratings_test["item_id"].map(lambda x : item2titles[x])
```
## create rating train/test dataset and upload into S3
```
# !aws s3 cp s3://dse-cohort5-group1/2-Keras-DeepRecommender/dataset/ratings_test.parquet ./data/
# !aws s3 cp s3://dse-cohort5-group1/2-Keras-DeepRecommender/dataset/ratings_train.parquet ./data/
ratings_test = pd.read_parquet('./data/ratings_test.parquet')
ratings_train = pd.read_parquet('./data/ratings_train.parquet')
ratings_train[:3]
ratings_train.shape
```
# **Define embeddings
### The $\underline{embeddings}$ are low-dimensional hidden representations of users and items,
### i.e. for each item I can find its properties and for each user I can encode how much they like those properties so I can determine attitudes or preferences of users by a small number of hidden factors
### Throughout the training, I learn two new low-dimensional dense representations: one embedding for the users and another one for the items.
```
price = all_info['price'].unique()
titles = all_info['title'].unique()
```
# 1. Matrix factorization approach

```
# declare input embeddings to the model
# User input
user_id_input = Input(shape=[1], name='user')
# Item Input
item_id_input = Input(shape=[1], name='item')
price_id_input = Input(shape=[1], name='price')
title_id_input = Input(shape=[1], name='title')
# define the size of embeddings as a parameter
# Check 5, 10 , 15, 20, 50
user_embedding_size = embedding_size
item_embedding_size = embedding_size
price_embedding_size = embedding_size
title_embedding_size = embedding_size
# apply an embedding layer to all inputs
user_embedding = Embedding(output_dim=user_embedding_size, input_dim=users.shape[0],
input_length=1, name='user_embedding')(user_id_input)
item_embedding = Embedding(output_dim=item_embedding_size, input_dim=items_reviewed.shape[0],
input_length=1, name='item_embedding')(item_id_input)
price_embedding = Embedding(output_dim=price_embedding_size, input_dim=price.shape[0],
input_length=1, name='price_embedding')(price_id_input)
title_embedding = Embedding(output_dim=title_embedding_size, input_dim=titles.shape[0],
input_length=1, name='title_embedding')(title_id_input)
# reshape from shape (batch_size, input_length,embedding_size) to (batch_size, embedding_size).
user_vecs = Reshape([user_embedding_size])(user_embedding)
item_vecs = Reshape([item_embedding_size])(item_embedding)
price_vecs = Reshape([price_embedding_size])(price_embedding)
title_vecs = Reshape([title_embedding_size])(title_embedding)
```
### Matrix Factorisation works on the principle that we can learn the user and the item embeddings, and then predict the rating for each user-item by performing a dot (or scalar) product between the respective user and item embedding.
```
# Applying matrix factorization: declare the output as being the dot product between the two embeddings: items and users
y = Dot(1, normalize=False)([user_vecs, item_vecs])
!mkdir -p ./models
# create model
model = Model(inputs=
[
user_id_input,
item_id_input
],
outputs=y)
# compile model
model.compile(loss='mse',
optimizer="adam" )
# set save location for model
save_path = "./models"
thename = save_path + '/' + modname + '.h5'
mcheck = ModelCheckpoint(thename, monitor='val_loss', save_best_only=True)
# fit model
history = model.fit([ratings_train["user_id"]
, ratings_train["item_id"]
]
, ratings_train["score"]
, batch_size=64
, epochs=num_epochs
, validation_split=0.2
, callbacks=[mcheck]
, shuffle=True)
# Save the fitted model history to a file
with open('./histories/' + modname + '.pkl' , 'wb') as file_pi: pickle.dump(history.history, file_pi)
print("Save history in ", './histories/' + modname + '.pkl')
def disp_model(path,file,suffix):
model = load_model(path+file+suffix)
## Summarise the model
model.summary()
# Extract the learnt user and item embeddings, i.e., a table with number of items and users rows and columns, with number of columns is the dimension of the trained embedding.
# In our case, the embeddings correspond exactly to the weights of the model:
weights = model.get_weights()
print ("embeddings \ weights shapes",[w.shape for w in weights])
return model
model_path = "./models/"
def plt_pickle(path,file,suffix):
with open(path+file+suffix , 'rb') as file_pi:
thepickle= pickle.load(file_pi)
plot(thepickle["loss"],label ='Train Error ' + file,linestyle="--")
plot(thepickle["val_loss"],label='Validation Error ' + file)
plt.legend()
plt.xlabel("Epoch")
plt.ylabel("Error")
##plt.ylim(0, 0.1)
return pd.DataFrame(thepickle,columns =['loss','val_loss'])
hist_path = "./histories/"
model=disp_model(model_path, modname, '.h5')
# Display the model using keras
SVG(model_to_dot(model).create(prog='dot', format='svg'))
x=plt_pickle(hist_path, modname, '.pkl')
x.head(20).transpose()
```
| github_jupyter |
# Description
This notebook is used to request computation of average time-series of a WaPOR data layer for an area using WaPOR API.
You will need WaPOR API Token to use this notebook
# Step 1: Read APIToken
Get your APItoken from https://wapor.apps.fao.org/profile. Enter your API Token when running the cell below.
```
import requests
import pandas as pd
path_query=r'https://io.apps.fao.org/gismgr/api/v1/query/'
path_sign_in=r'https://io.apps.fao.org/gismgr/api/v1/iam/sign-in/'
APIToken=input('Your API token: ')
```
# Step 2: Get Authorization AccessToken
Using the input API token to get AccessToken for authorization
```
resp_signin=requests.post(path_sign_in,headers={'X-GISMGR-API-KEY':APIToken})
resp_signin = resp_signin.json()
AccessToken=resp_signin['response']['accessToken']
AccessToken
```
# Step 3: Write Query Payload
For more examples of areatimeseries query load
visit https://io.apps.fao.org/gismgr/api/v1/swagger-ui/examples/AreaStatsTimeSeries.txt
```
crs="EPSG:4326" #coordinate reference system
cube_code="L1_PCP_E"
workspace='WAPOR_2'
start_date="2009-01-01"
end_date="2019-01-01"
#get datacube measure
cube_url=f'https://io.apps.fao.org/gismgr/api/v1/catalog/workspaces/{workspace}/cubes/{cube_code}/measures'
resp=requests.get(cube_url).json()
measure=resp['response']['items'][0]['code']
print('MEASURE: ',measure)
#get datacube time dimension
cube_url=f'https://io.apps.fao.org/gismgr/api/v1/catalog/workspaces/{workspace}/cubes/{cube_code}/dimensions'
resp=requests.get(cube_url).json()
items=pd.DataFrame.from_dict(resp['response']['items'])
dimension=items[items.type=='TIME']['code'].values[0]
print('DIMENSION: ',dimension)
```
## Define area by coordinate extent
```
bbox= [37.95883206252312, 7.89534, 43.32093, 12.3873979377346] #latlon
xmin,ymin,xmax,ymax=bbox[0],bbox[1],bbox[2],bbox[3]
Polygon=[
[xmin,ymin],
[xmin,ymax],
[xmax,ymax],
[xmax,ymin],
[xmin,ymin]
]
query_areatimeseries={
"type": "AreaStatsTimeSeries",
"params": {
"cube": {
"code": cube_code, #cube_code
"workspaceCode": workspace, #workspace code: use WAPOR for v1.0 and WAPOR_2 for v2.1
"language": "en"
},
"dimensions": [
{
"code": dimension, #use DAY DEKAD MONTH or YEAR
"range": f"[{start_date},{end_date})" #start date and endate
}
],
"measures": [
measure
],
"shape": {
"type": "Polygon",
"properties": {
"name": crs #coordinate reference system
},
"coordinates": [
Polygon
]
}
}
}
query_areatimeseries
```
## OR define area by reading GeoJSON
```
import ogr
shp_fh=r".\data\Awash_shapefile.shp"
shpfile=ogr.Open(shp_fh)
layer=shpfile.GetLayer()
epsg_code=layer.GetSpatialRef().GetAuthorityCode(None)
shape=layer.GetFeature(0).ExportToJson(as_object=True)['geometry'] #get geometry of shapefile in JSON string
shape["properties"]={"name": "EPSG:{0}".format(epsg_code)}#latlon projection
query_areatimeseries={
"type": "AreaStatsTimeSeries",
"params": {
"cube": {
"code": cube_code,
"workspaceCode": workspace,
"language": "en"
},
"dimensions": [
{
"code": dimension,
"range": f"[{start_date},{end_date})"
}
],
"measures": [
measure
],
"shape": shape
}
}
query_areatimeseries
```
# Step 4: Post the QueryPayload with AccessToken in Header
In responses, get an url to query job.
```
resp_query=requests.post(path_query,headers={'Authorization':'Bearer {0}'.format(AccessToken)},
json=query_areatimeseries)
resp_query = resp_query.json()
job_url=resp_query['response']['links'][0]['href']
job_url
```
# Step 5: Get Job Results.
It will take some time for the job to be finished. When the job is finished, its status will be changed from 'RUNNING' to 'COMPLETED' or 'COMPLETED WITH ERRORS'. If it is COMPLETED, the area time series results can be achieved from Response 'output'.
```
i=0
print('RUNNING',end=" ")
while i==0:
resp = requests.get(job_url)
resp=resp.json()
if resp['response']['status']=='RUNNING':
print('.',end =" ")
if resp['response']['status']=='COMPLETED':
results=resp['response']['output']
df=pd.DataFrame(results['items'],columns=results['header'])
i=1
if resp['response']['status']=='COMPLETED WITH ERRORS':
print(resp['response']['log'])
i=1
df
df.index=pd.to_datetime(df.day,format='%Y-%m-%d')
df.plot()
```
| github_jupyter |
```
import os
os.chdir("C:\\Users\\Pieter-Jan\\Documents\\Work\\Candriam\\nlp\\ESG\\top2Vec\\TopicModelling")
from modules import Top2Vec_custom
import pandas as pd
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
import pickle
import plotly.express as px
%reload_ext autoreload
%autoreload 2
df = pd.read_csv("data\\CRS_processed_PyMuPDF_REIT-Industrial.txt", sep="\t")
df.shape
df.head(2)
paragraphs = df["paragraph"].values.tolist()
# %%time
# model_distilBert = Top2Vec_custom.Top2Vec(
# documents=paragraphs,
# embedding_model='distiluse-base-multilingual-cased',
# load_doc_embed=False,
# save_doc_embed=True,
# path_doc_embed="output/distBert_embedding_REIT-Industrial.npy"
# )
# %%time
# model_distilBert = Top2Vec_custom.Top2Vec(
# documents=paragraphs,
# embedding_model='distiluse-base-multilingual-cased',
# load_doc_embed=True,
# save_doc_embed=Falselse,
# path_doc_embed="output/distBert_embedding_REIT-Industrial.npy"
# )
# model_dir = "output/distilBert_REIT-Industrial.sav"
# with open(model_dir, 'wb') as file:
# pickle.dump(model_distilBert, file)
model_dir = "output/distilBert_REIT-Industrial.sav"
with open(model_dir, 'rb') as file:
model_distilBert = pickle.load(file)
topic_words, word_scores, topic_nums = model_distilBert.get_topics()
topic_sizes, topic_nums = model_distilBert.get_topic_sizes()
pd.DataFrame(topics_scores_df).T
topics_top2Vec = pd.DataFrame(topic_words).iloc[:,0:10]
topics_top2Vec["size"] = topic_sizes
topics_top2Vec
```
## Key word / sentence topic loading
```
keyword_embed = model_distilBert.embed(["volunteering"])
res = cosine_similarity(keyword_embed, model_distilBert.topic_vectors)
scores = pd.DataFrame(res, index=["Cosine similiarity"]).T
scores["Topic"] = list(range(0,len(scores)))
scores["Top words"] = scores["Topic"].apply(lambda x: list(topics_top2Vec.iloc[x,0:3]))
scores.sort_values(by="Cosine similiarity", ascending=False, inplace=True)
scores.head(10)
fig = px.bar(scores.iloc[0:10,:], x='Topic', y='Cosine similiarity', text="Top words", title='10 highest topic loadings')
fig.update_layout(xaxis=dict(type='category'),
xaxis_title="Topic number")
fig.show()
```
## Most similar documents for a topic
```
documents, document_scores, document_ids = model_distilBert.search_documents_by_topic(topic_num=5, num_docs=2)
for doc, score, doc_id in zip(documents, document_scores, document_ids):
print(f"Document: {doc_id}, Filename (Company and year): {df.iloc[doc_id,:].filename}, Score: {score}")
print("-----------")
print(doc)
print("-----------")
print()
unique_labels = set(model_distilBert.clustering.labels_)
model_distilBert._create_topic_vectors()
df["topic"] = model_distilBert.clustering.labels_
out = pd.DataFrame(df.groupby(["filename","topic"]).count().iloc[:,0])
out_sorted = (out.iloc[out.index.get_level_values(0) == out.index.get_level_values(0)[0],:].
sort_values(out.columns[0], ascending=False))
out_sorted["topic"] = out_sorted.index.get_level_values(1)
out_sorted["top words"] = out_sorted["topic"].apply(lambda x: list(topics_top2Vec.iloc[x, 0:3]) if x >= 0 else list(["Noise topic"]))
out_sorted
fig = px.bar(out_sorted.head(10), x='topic', y=out.columns[0], text="top words", title='10 highest topic counts')
fig.update_layout(xaxis=dict(type='category'),
xaxis_title="Topic number",
yaxis_title="Count")
fig.show()
model_distilBert._deduplicate_topics()
model_distilBert.topic_vectors.shape
model_distilBert.get_num_topics()
```
## Update model
```
model_distilBert.n_components = 5
model_distilBert.ngram_range = (1,4)
model_distilBert._update_steps(documents=paragraphs, step=1)
topic_words, word_scores, topic_nums = model_distilBert.get_topics()
topic_sizes, topic_nums = model_distilBert.get_topic_sizes()
topics_top2Vec = pd.DataFrame(topic_words).iloc[:,0:10]
topics_top2Vec["size"] = topic_sizes
topics_top2Vec
```
| github_jupyter |
```
%matplotlib inline
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime as dt
```
# Reflect Tables into SQLAlchemy ORM
```
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, inspect
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# We can view all of the classes that automap found
Base.classes.keys()
# Save references to each table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
inspector = inspect(engine)
```
# Exploratory Climate Analysis
```
columns = inspector.get_columns('Measurement')
for column in columns:
print(column["name"], column["type"])
columns = inspector.get_columns('Station')
for column in columns:
print(column["name"], column["type"])
# Design a query to retrieve the last 12 months of precipitation data and plot the results
# Calculate the date 1 year ago from the last data point in the database
LatestDate=np.ravel(session.query(Measurement.date).order_by(Measurement.date.desc()).first())
LatestDate=str(LatestDate).replace("-","").replace("'","").replace("[","").replace("]","")
LatestDate
#Date Calculation Using regex
import re
#Split Year, Month and Date to form a Date time format
CYear=int(re.sub(r'(\d{4})(\d{2})(\d{2})', r'\1', LatestDate))
CMonth=int(re.sub(r'(\d{4})(\d{2})(\d{2})', r'\2', LatestDate))
CDay=int(re.sub(r'(\d{4})(\d{2})(\d{2})', r'\3', LatestDate))
LatestDateFormat = dt.datetime(CYear,CMonth,CDay)
#Subract a year
from dateutil.relativedelta import relativedelta
OneYearAgoDate =(LatestDateFormat) + relativedelta(years=-1)
# Convert Back to queriable pattern
Latest = re.sub(r'(\d{4})(\d{2})(\d{2})', r'\1-\2-\3', LatestDate)
OYear=str(OneYearAgoDate.year)
OMonth=str(OneYearAgoDate.month)
ODay=str(OneYearAgoDate.day)
if len(OMonth) == 1:
OMonth= "0" + OMonth
if len(ODay) == 1:
ODay= "0" + ODay
OneYearAgo = OYear + "-" + OMonth + "-" + ODay
Latest,OneYearAgo
# Perform a query to retrieve the data and precipitation scores
LastYearPreciptitationData=session.query(Measurement.date,Measurement.prcp).filter(Measurement.date >= OneYearAgo).order_by(Measurement.date.desc()).all()
session.query(Measurement.date,Measurement.prcp).filter(Measurement.date >= OneYearAgo).order_by(Measurement.date.desc()).count()
# Save the query results as a Pandas DataFrame and set the index to the date column
LPData=pd.DataFrame()
for L in LastYearPreciptitationData:
df=pd.DataFrame({'Date':[L[0]],"Prcp":[L[1]]})
LPData=LPData.append(df)
# Sort the dataframe by date
LPData=LPData.set_index('Date').sort_values(by="Date",ascending=False)
LPData.head(10)
```

```
# Use Pandas Plotting with Matplotlib to plot the data
LPData.plot(rot=90);
plt.ylim(0,7)
plt.xlabel("Date")
plt.ylabel("Rain (Inches)")
plt.title("Precipitation Analysis")
plt.legend(["Precipitation"])
plt.savefig("./Output/Figure1.png")
plt.show()
# Use Pandas to calcualte the summary statistics for the precipitation data
LPData.describe()
```

```
# Design a query to show how many stations are available in this dataset?
# ---- From Measurement Data
session.query(Measurement.station).group_by(Measurement.station).count()
#----From Station Date
session.query(Station).count()
#-- Method 1 -- Using DataFrame
# What are the most active stations? (i.e. what stations have the most rows)?
# List the stations and the counts in descending order.
Stations=session.query(Measurement.station,Measurement.tobs).all()
station_df=pd.DataFrame()
for s in Stations:
df=pd.DataFrame({"Station":[s.station],"Tobs":[s.tobs]})
station_df=station_df.append(df)
ActiveStation=station_df.Station.value_counts()
ActiveStation
#-- Method 2 -- Using Direct Query
ActiveStationList=session.query(Measurement.station,func.count(Measurement.tobs)).group_by(Measurement.station).order_by(func.count(Measurement.tobs).desc()).all()
ActiveStationList
# Using the station id from the previous query, calculate the lowest temperature recorded,
# highest temperature recorded, and average temperature of the most active station?
station_df[station_df.Station == 'USC00519281'].Tobs.min(),station_df[station_df.Station == 'USC00519281'].Tobs.max(),station_df[station_df.Station == 'USC00519281'].Tobs.mean()
# Choose the station with the highest number of temperature observations.
print(f"The Station with Highest Number of temperature obervations is {ActiveStationList[0][0]} and the No of Observations are {ActiveStationList[0][1]}")
# Query the last 12 months of temperature observation data for this station and plot the results as a histogram
Last12TempO=session.query(Measurement.tobs).filter(Measurement.date > OneYearAgo).filter(Measurement.station==ActiveStationList[0][0]).all()
df=pd.DataFrame(Last12TempO)
plt.hist(df['tobs'],12,color='purple',hatch="/",edgecolor="yellow")
plt.xlabel("Temperature",fontsize=14)
plt.ylabel("Frequency", fontsize=14)
plt.title("One Year Temperature (For Station USC00519281)",fontsize=14)
labels=["Temperature obervation"]
plt.legend(labels)
plt.savefig("./Output/Figure2.png")
plt.show()
```

```
# This function called `calc_temps` will accept start date and end date in the format '%Y-%m-%d'
# and return the minimum, average, and maximum temperatures for that range of dates
def calc_temps(start_date, end_date):
"""TMIN, TAVG, and TMAX for a list of dates.
Args:
start_date (string): A date string in the format %Y-%m-%d
end_date (string): A date string in the format %Y-%m-%d
Returns:
TMIN, TAVE, and TMAX
"""
return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()
# function usage example
print(calc_temps('2012-02-28', '2012-03-05'))
#----First Sample
# Use your previous function `calc_temps` to calculate the tmin, tavg, and tmax
# for your trip using the previous year's data for those same dates.
TemperatureAverageLast12Months=calc_temps(OneYearAgo, Latest)
print(TemperatureAverageLast12Months)
#----Second Sample
calc_temps('2015-08-21', '2016-08-21')
# Plot the results from your previous query as a bar chart.
# Use "Trip Avg Temp" as your Title
# Use the average temperature for the y value
# Use the peak-to-peak (tmax-tmin) value as the y error bar (yerr)
Error = TemperatureAverageLast12Months[0][2]-TemperatureAverageLast12Months[0][0]
AverageTemp = TemperatureAverageLast12Months[0][1]
MinTemp = TemperatureAverageLast12Months[0][0]
MaxTemp = TemperatureAverageLast12Months[0][2]
fig, ax = plt.subplots(figsize=(5,6))
bar_chart = ax.bar(1 , AverageTemp, color= 'salmon', tick_label='',yerr=Error, alpha=0.6)
ax.set_xlabel("Trip")
ax.set_ylabel("Temp (F)")
ax.set_title("Trip Avg Temp")
def autolabels(rects):
for rect in rects:
h=rect.get_height()
#label the bars
autolabels(bar_chart)
plt.ylim(0, 100)
plt.xlim(0,2)
ax.xaxis.grid()
fig.tight_layout()
plt.savefig("./Output/temperature.png")
plt.show()
# Plot the results from your previous query as a bar chart.
# Use "Trip Avg Temp" as your Title
# Use the average temperature for the y value
# Use the peak-to-peak (tmax-tmin) value as the y error bar (yerr)
TripStartTime= '2016-08-21'
TripEndTime = '2016-08-30'
FirstStep = [Station.station, Station.name, Station.latitude, Station.longitude, Station.elevation, func.sum(Measurement.prcp)]
PlaceForTrip = session.query(*FirstStep).\
filter(Measurement.station == Station.station).\
filter(Measurement.date >= TripStartTime).\
filter(Measurement.date <= TripEndTime).\
group_by(Station.name).order_by(func.sum(Measurement.prcp).desc()).all()
print (PlaceForTrip)
# Calculate the total amount of rainfall per weather station for your trip dates using the previous year's matching dates.
# Sort this in descending order by precipitation amount and list the station, name, latitude, longitude, and elevation
```
## Optional Challenge Assignment
```
# Create a query that will calculate the daily normals
# (i.e. the averages for tmin, tmax, and tavg for all historic data matching a specific month and day)
def daily_normals(date):
"""Daily Normals.
Args:
date (str): A date string in the format '%m-%d'
Returns:
A list of tuples containing the daily normals, tmin, tavg, and tmax
"""
sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]
return session.query(*sel).filter(func.strftime("%m-%d", Measurement.date) == date).all()
daily_normals("01-01")
# calculate the daily normals for your trip
# push each tuple of calculations into a list called `normals`
normals=[]
# Set the start and end date of the trip
TripStartTime= '2016-08-21'
TripEndTime = '2016-08-30'
# Stip off the year and save a list of %m-%d strings
TripStartTime=TripStartTime.replace("-","")
StartDate=int(re.sub(r'(\d{4})(\d{2})(\d{2})', r'\3', TripStartTime))
TripEndTime=TripEndTime.replace("-","")
EndDate=int(re.sub(r'(\d{4})(\d{2})(\d{2})', r'\3', TripEndTime))
TripMonth=re.sub(r'(\d{4})(\d{2})(\d{2})', r'\2', TripEndTime)
if len(TripMonth) == 1:
TripMonth= "0" + TripMonth
# Use the start and end date to create a range of dates
Dates = [f"{TripMonth}-{num}" for num in range(StartDate, EndDate)]
# Loop through the list of %m-%d strings and calculate the normals for each date
for d in Dates:
Normal = daily_normals(d)
normals.extend(Normal)
normals
# Load the previous query results into a Pandas DataFrame and add the `trip_dates` range as the `date` index
TempMin = [x[0] for x in normals]
TempAvg = [x[1] for x in normals]
TempMax = [x[2] for x in normals]
SYear=int(re.sub(r'(\d{4})(\d{2})(\d{2})', r'\1', TripStartTime))
TripDatesYear = [f"{SYear}-{d}" for d in Dates]
TripDatesYear
trip_normals = pd.DataFrame({"TempMin":TempMin, "TempAvg":TempAvg, "TempMax":TempMax, "date":TripDatesYear}).set_index("date")
trip_normals.head()
# Plot the daily normals as an area plot with `stacked=False`
trip_normals.plot(kind="area", stacked=False)
plt.legend(loc="right")
plt.ylabel("Temperature (F)")
plt.xticks(range(len(trip_normals.index)), trip_normals.index, rotation="60")
plt.savefig("./Output/daily-normals.png")
plt.show()
# Plot the daily normals as an area plot with `stacked=False`
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn import metrics
from sklearn.impute import SimpleImputer
boston = load_boston()
regressor = RandomForestRegressor(n_estimators=100, random_state=0)
cross_val_score(regressor, boston.data, boston.target, cv=10, scoring="neg_mean_squared_error")
sorted(metrics.SCORERS.keys())
```
# 使用随即森林填补缺失值
```
dataset = load_boston()
dataset.data.shape
#总共506*13=6578个数据
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
```
添加缺失值
```
#首先确定我们希望放入的缺失数据的比例,在这里我们假设是50%,那总共就要有3289个数据缺失
rng = np.random.RandomState(0)
missing_rate = 0.5
n_missing_samples = int(np.floor(n_samples * n_features * missing_rate))
#np.floor向下取整,返回.0格式的浮点数
#所有数据要随机遍布在数据集的各行各列当中,而一个缺失的数据会需要一个行索引和一个列索引
#如果能够创造一个数组,包含3289个分布在0~506中间的行索引,和3289个分布在0~13之间的列索引,那我们就可
#以利用索引来为数据中的任意3289个位置赋空值
#然后我们用0,均值和随机森林来填写这些缺失值,然后查看回归的结果如何
missing_features = rng.randint(0,n_features,n_missing_samples)
missing_samples = rng.randint(0,n_samples,n_missing_samples)
#missing_samples = rng.choice(dataset.data.shape[0],n_missing_samples,replace=False)
#我们现在采样了3289个数据,远远超过我们的样本量506,所以我们使用随机抽取的函数randint。但如果我们需要
#的数据量小于我们的样本量506,那我们可以采用np.random.choice来抽样,choice会随机抽取不重复的随机数,
#因此可以帮助我们让数据更加分散,确保数据不会集中在一些行中
X_missing = X_full.copy()
y_missing = y_full.copy()
X_missing[missing_samples,missing_features] = np.nan
X_missing = pd.DataFrame(X_missing)
#转换成DataFrame是为了后续方便各种操作,numpy对矩阵的运算速度快到拯救人生,但是在索引等功能上却不如pandas
```
使用0和均值填充
```
#使用均值进行填补
from sklearn.impute import SimpleImputer
imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean')
X_missing_mean = imp_mean.fit_transform(X_missing)
#使用0进行填补
imp_0 = SimpleImputer(missing_values=np.nan, strategy="constant",fill_value=0)
X_missing_0 = imp_0.fit_transform(X_missing)
```
使用随即森林填充缺失值
```
"""
使用随机森林回归填补缺失值
任何回归都是从特征矩阵中学习,然后求解连续型标签y的过程,之所以能够实现这个过程,是因为回归算法认为,特征
矩阵和标签之前存在着某种联系。实际上,标签和特征是可以相互转换的,比如说,在一个“用地区,环境,附近学校数
量”预测“房价”的问题中,我们既可以用“地区”,“环境”,“附近学校数量”的数据来预测“房价”,也可以反过来,
用“环境”,“附近学校数量”和“房价”来预测“地区”。而回归填补缺失值,正是利用了这种思想。
对于一个有n个特征的数据来说,其中特征T有缺失值,我们就把特征T当作标签,其他的n-1个特征和原本的标签组成新
的特征矩阵。那对于T来说,它没有缺失的部分,就是我们的Y_test,这部分数据既有标签也有特征,而它缺失的部分,只有特征没有标签,就是我们需要预测的部分。
特征T不缺失的值对应的其他n-1个特征 + 本来的标签:X_train
特征T不缺失的值:Y_train
特征T缺失的值对应的其他n-1个特征 + 本来的标签:X_test
特征T缺失的值:未知,我们需要预测的Y_test
这种做法,对于某一个特征大量缺失,其他特征却很完整的情况,非常适用。
那如果数据中除了特征T之外,其他特征也有缺失值怎么办?
答案是遍历所有的特征,从缺失最少的开始进行填补(因为填补缺失最少的特征所需要的准确信息最少)。
填补一个特征时,先将其他特征的缺失值用0代替,每完成一次回归预测,就将预测值放到原本的特征矩阵中,再继续填
补下一个特征。每一次填补完毕,有缺失值的特征会减少一个,所以每次循环后,需要用0来填补的特征就越来越少。当
进行到最后一个特征时(这个特征应该是所有特征中缺失值最多的),已经没有任何的其他特征需要用0来进行填补了,
而我们已经使用回归为其他特征填补了大量有效信息,可以用来填补缺失最多的特征。
遍历所有的特征后,数据就完整,不再有缺失值了。
"""
X_missing_reg = X_missing.copy()
# 找出数据集中缺失值最多的从小到大的排序
sortindex = np.argsort(X_missing_reg.isnull().sum(axis=0)).values
for i in sortindex:
#构建我们的新特征矩阵和新标签
df = X_missing_reg
fillc = df.iloc[:,i]
df = pd.concat([df.iloc[:,df.columns != i],pd.DataFrame(y_full)],axis=1)
#在新特征矩阵中,对含有缺失值的列,进行0的填补
df_0 =SimpleImputer(missing_values=np.nan,strategy='constant',fill_value=0).fit_transform(df)
#找出我们的训练集和测试集
Ytrain = fillc[fillc.notnull()]
Ytest = fillc[fillc.isnull()]
Xtrain = df_0[Ytrain.index,:]
Xtest = df_0[Ytest.index,:]
#用随机森林回归来填补缺失值
rfc = RandomForestRegressor(n_estimators=100)
rfc = rfc.fit(Xtrain, Ytrain)
Ypredict = rfc.predict(Xtest)
#将填补好的特征返回到我们的原始的特征矩阵中
X_missing_reg.loc[X_missing_reg.iloc[:,i].isnull(),i] = Ypredict
```
对填补好的数据进行建模
```
#对所有数据进行建模,取得MSE结果
X = [X_full,X_missing_mean,X_missing_0,X_missing_reg]
mse = []
std = []
for x in X:
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
scores = cross_val_score(estimator,x,y_full,scoring='neg_mean_squared_error',cv=5).mean()
mse.append(scores * -1)
x_labels = ['Full data',
'Zero Imputation',
'Mean Imputation',
'Regressor Imputation']
colors = ['r', 'g', 'b', 'orange']
plt.figure(figsize=(12, 6))
ax = plt.subplot(111)
for i in np.arange(len(mse)):
ax.barh(i, mse[i],color=colors[i], alpha=0.6, align='center')
ax.set_title('Imputation Techniques with Boston Data')
ax.set_xlim(left=np.min(mse) * 0.9,
right=np.max(mse) * 1.1)
ax.set_yticks(np.arange(len(mse)))
ax.set_xlabel('MSE')
ax.set_yticklabels(x_labels)
plt.show()
```
| github_jupyter |
```
%%html
<link href="http://mathbook.pugetsound.edu/beta/mathbook-content.css" rel="stylesheet" type="text/css" />
<link href="https://aimath.org/mathbook/mathbook-add-on.css" rel="stylesheet" type="text/css" />
<style>.subtitle {font-size:medium; display:block}</style>
<link href="https://fonts.googleapis.com/css?family=Open+Sans:400,400italic,600,600italic" rel="stylesheet" type="text/css" />
<link href="https://fonts.googleapis.com/css?family=Inconsolata:400,700&subset=latin,latin-ext" rel="stylesheet" type="text/css" /><!-- Hide this cell. -->
<script>
var cell = $(".container .cell").eq(0), ia = cell.find(".input_area")
if (cell.find(".toggle-button").length == 0) {
ia.after(
$('<button class="toggle-button">Toggle hidden code</button>').click(
function (){ ia.toggle() }
)
)
ia.hide()
}
</script>
```
**Important:** to view this notebook properly you will need to execute the cell above, which assumes you have an Internet connection. It should already be selected, or place your cursor anywhere above to select. Then press the "Run" button in the menu bar above (the right-pointing arrowhead), or press Shift-Enter on your keyboard.
$\newcommand{\identity}{\mathrm{id}}
\newcommand{\notdivide}{\nmid}
\newcommand{\notsubset}{\not\subset}
\newcommand{\lcm}{\operatorname{lcm}}
\newcommand{\gf}{\operatorname{GF}}
\newcommand{\inn}{\operatorname{Inn}}
\newcommand{\aut}{\operatorname{Aut}}
\newcommand{\Hom}{\operatorname{Hom}}
\newcommand{\cis}{\operatorname{cis}}
\newcommand{\chr}{\operatorname{char}}
\newcommand{\Null}{\operatorname{Null}}
\newcommand{\lt}{<}
\newcommand{\gt}{>}
\newcommand{\amp}{&}
$
<div class="mathbook-content"><h2 class="heading hide-type" alt="Exercises 10.5 Sage Exercises"><span class="type">Section</span><span class="codenumber">10.5</span><span class="title">Sage Exercises</span></h2><a href="normal-sage-exercises.ipynb" class="permalink">¶</a></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-381"><h6 class="heading"><span class="codenumber">1</span></h6><p id="p-1698">Build every subgroup of the alternating group on 5 symbols, $A_5\text{,}$ and check that each is not a normal subgroup (except for the two trivial cases). This command might take a couple seconds to run. Compare this with the time needed to run the <code class="code-inline tex2jax_ignore">.is_simple()</code> method and realize that there is a significant amount of theory and cleverness brought to bear in speeding up commands like this. (It is possible that your Sage installation lacks <abbr class="acronym">GAP</abbr>'s “Table of Marks” library and you will be unable to compute the list of subgroups.)</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-382"><h6 class="heading"><span class="codenumber">2</span></h6><p id="p-1699">Consider the quotient group of the group of symmetries of an $8$-gon, formed with the cyclic subgroup of order $4$ generated by a quarter-turn. Use the <code class="code-inline tex2jax_ignore">coset_product</code> function to determine the Cayley table for this quotient group. Use the number of each coset, as produced by the <code class="code-inline tex2jax_ignore">.cosets()</code> method as names for the elements of the quotient group. You will need to build the table “by hand” as there is no easy way to have Sage's Cayley table command do this one for you. You can build a table in the Sage Notebook pop-up editor (shift-click on a blue line) or you might read the documentation of the <code class="code-inline tex2jax_ignore">html.table()</code> method.</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-383"><h6 class="heading"><span class="codenumber">3</span></h6><p id="p-1700">Consider the cyclic subgroup of order $4$ in the symmetries of an $8$-gon. Verify that the subgroup is normal by first building the raw left and right cosets (without using the <code class="code-inline tex2jax_ignore">.cosets()</code> method) and then checking their equality in Sage, all with a single command that employs sorting with the <code class="code-inline tex2jax_ignore">sorted()</code> command.</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-384"><h6 class="heading"><span class="codenumber">4</span></h6><p id="p-1701">Again, use the same cyclic subgroup of order $4$ in the group of symmetries of an $8$-gon. Check that the subgroup is normal by using part (2) of Theorem <a href="section-factor-groups.ipynb#theorem-normal-equivalents" class="xref" alt="Theorem 10.3 " title="Theorem 10.3 ">10.3</a>. Construct a one-line command that does the complete check and returns <code class="code-inline tex2jax_ignore">True</code>. Maybe sort the elements of the subgroup <code class="code-inline tex2jax_ignore">S</code> first, then slowly build up the necessary lists, commands, and conditions in steps. Notice that this check does not require ever building the cosets.</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-385"><h6 class="heading"><span class="codenumber">5</span></h6><p id="p-1702">Repeat the demonstration from the previous subsection that for the symmetries of a tetrahedron, a cyclic subgroup of order $3$ results in an undefined coset multiplication. Above, the default setting for the <code class="code-inline tex2jax_ignore">.cosets()</code> method builds right cosets — but in this problem, work instead with left cosets. You need to choose two cosets to multiply, and then demonstrate two choices for representatives that lead to different results for the product of the cosets.</p></article></div>
<div class="mathbook-content"><article class="exercise-like" id="exercise-386"><h6 class="heading"><span class="codenumber">6</span></h6><p id="p-1703">Construct some dihedral groups of order $2n$ (i.e. symmetries of an $n$-gon, $D_{n}$ in the text, <code class="code-inline tex2jax_ignore">DihedralGroup(n)</code> in Sage). Maybe all of them for $3\leq n \leq 100\text{.}$ For each dihedral group, construct a list of the orders of each of the normal subgroups (so use <code class="code-inline tex2jax_ignore">.normal_subgroups()</code>). You may need to wait ten or twenty seconds for this to finish - be patient. Observe enough examples to hypothesize a pattern to your observations, check your hypothesis against each of your examples and then state your hypothesis clearly.</p><p id="p-1704">Can you predict how many normal subgroups there are in the dihedral group $D_{470448}$ without using Sage to build all the normal subgroups? Can you <em class="emphasis">describe</em> all of the normal subgroups of a dihedral group in a way that would let us predict all of the normal subgroups of $D_{470448}$ without using Sage?</p></article></div>
| github_jupyter |
```
from matplotlib import pyplot as plt
import pandas as pd
import seaborn as sns
from matplotlib import rcParams
import numpy as np
%matplotlib inline
rcParams['font.sans-serif'] = 'arial'
pal = sns.xkcd_palette(['dark sky blue', 'light sky blue', 'deep red']).as_hex()
imprinting_df = pd.read_csv('../data/imprinting_function_birth_year.csv')
pop_df = pd.read_csv('../data/demography_by_birth_year.csv')
profiles = pd.read_csv('../final_results_for_ms/15-100/DAHVcohort_subtype.profile_liks.csv', index_col='param')
imprinting_df = imprinting_df[imprinting_df.Season==2018]
pop_df = pop_df[pop_df.Season==2018]
def make_pie_scatter(X, Y, r1, r2, ax, colors, size=200, edgecolor='#666666'):
x = [0] + np.cos(np.linspace(0, 2 * np.pi * r1, 1000)).tolist()
y = [0] + np.sin(np.linspace(0, 2 * np.pi * r1, 1000)).tolist()
xy1 = np.column_stack([x, y])
s1 = np.abs(xy1).max()
x = [0] + np.cos(np.linspace(2 * np.pi * r1, 2 * np.pi * r2, 1000)).tolist()
y = [0] + np.sin(np.linspace(2 * np.pi * r1, 2 * np.pi * r2, 1000)).tolist()
xy2 = np.column_stack([x, y])
s2 = np.abs(xy2).max()
x = [0] + np.cos(np.linspace(2 * np.pi * r2, 2 * np.pi, 1000)).tolist()
y = [0] + np.sin(np.linspace(2 * np.pi * r2, 2 * np.pi, 1000)).tolist()
xy3 = np.column_stack([x, y])
s3 = np.abs(xy3).max()
ax.scatter([X], [Y], marker=(xy1),
s=size, facecolor=colors[0],
edgecolor=edgecolor)
ax.scatter([X], [Y], marker=(xy2),
s=size, facecolor=colors[1],
edgecolor=edgecolor)
ax.scatter([X], [Y], marker=(xy3),
s=size, facecolor=colors[2],
edgecolor=edgecolor)
def get_imprinting_probs(cohort_label):
min_birth_year, max_birth_year = cohort_label.split('-')
min_birth_year = int(min_birth_year)
max_birth_year = int(max_birth_year)
m = imprinting_df[(imprinting_df.Birth_year >= min_birth_year) &
(imprinting_df.Birth_year <= max_birth_year)].sort_values('Birth_year')
p = pop_df[(pop_df.Birth_year >= min_birth_year) &
(pop_df.Birth_year <= max_birth_year)].sort_values('Birth_year')
weights = np.array(p.Population / p.sum().Population)
h1 = sum(m['H1'] * weights)
h2 = sum(m['H2'] * weights)
h3 = sum(m['H3'] * weights)
return(h1, h2, h3)
x = []
y = []
ax0 = plt.subplot(111)
ax0.plot([0, 1], [0, 1], '--', color='#cccccc', zorder=0)
flip = ['1968-1977']
for param, row in profiles.iterrows():
if type(param) == str:
if 'h1' in param and 'VE' in param:
label = param.split('_')[1].replace('.','-')
if label == '2003-2007':
label = '2003-2006'
if label == '1917-1952':
label = '1918-1952'
h1, h2, h3 = get_imprinting_probs(label)
print(h1,h2,h3,label)
row2 = profiles.loc[param.replace('h1', 'h3'), ]
if row.mle != 0.5:
#y.append(row.mle)
#x.append(row2.mle)
if label in flip:
ax0.text(row2.mle - 0.02, row.mle -0.03, label, va='center', ha='right', size=9)
else:
ax0.text(row2.mle + 0.02, row.mle -0.03, label, va='center', size=9)
# errorbars
ax0.hlines(row.mle, row2.prof_min, row2.prof_max, linestyle='-', color='#aaaaaa', zorder=0)
ax0.vlines(row2.mle, row.prof_min, row.prof_max, linestyle='-', color='#aaaaaa', zorder=0)
make_pie_scatter(row2.mle, row.mle, h1, h1+h2, ax0, pal)
#ax0.plot(x, y, 'o', markeredgecolor='purple', color='white')
#ax0.set_ylim(0, 1.05)
l_h1, = plt.plot([100, 100], [100, 100], 's', color=pal[0], markersize=10, label='H1N1', markeredgecolor='k')
l_h2, = plt.plot([100, 100], [100, 100], 's', color=pal[1], markersize=10, label='H3N2', markeredgecolor='k')
l_h3, = plt.plot([100, 100], [100, 100], 's', color=pal[2], markersize=10, label='H3N2', markeredgecolor='k')
plt.legend((l_h1, l_h2, l_h3), ('H1N1', 'H2N2', 'H3N2'), ncol=3, loc='upper center', bbox_to_anchor=(0.5, -0.2), title='Imprinting subtype')
plt.xticks(np.arange(0, 1.1, 0.1), range(0,110,10))
plt.yticks(np.arange(0, 1.1, 0.1), range(0,110,10))
ax0.set_xlim(-0.05, 1.05)
ax0.set_ylim(-0.01, 1.05)
ax0.set_xlabel('Cohort-specific VE for H3N2 (%)', weight='bold')
ax0.set_ylabel('Cohort-specific VE for H1N1 (%)', weight='bold')
plt.gcf().set_size_inches(4,4)
import glob
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib import rcParams
from matplotlib.colors import ListedColormap
import numpy as np
%matplotlib inline
df = pd.read_csv('../final_results_for_ms/15-100/result_summary.csv', index_col='Unnamed: 0')
pal = sns.color_palette('colorblind').as_hex()
flatui = ['white', pal[3], 'darkgreen', 'lightgreen']
my_cmap = ListedColormap(sns.color_palette(flatui).as_hex())
rcParams['font.sans-serif'] = 'arial'
sns.set_context('paper')
full_model='DAHNV'
final_df = pd.DataFrame(columns=['D', 'E', 'Ap', 'vac_cov', 'Nu', 'A', 'N2', 'H_sub','H_group', 'V_constant', 'V_age', 'V_season', 'V_imprinting', 'V_cohort', 'cAIC'])
row = 0
df = df.iloc[1:, ]
exclude = ['DAHVage_subtype', 'DAHVcohort_subtype', 'DAHNVageseason_subtype', 'DAHNVageseason_group',
'DAHNVcohortseason_subtype', 'DAHNVcohortseason_group', 'DAVage', 'DAVcohort', 'DAVimprinting', 'DAVseason']
for model, r in df.iterrows():
if model not in exclude:
if 'Vage' in model:
V = 'V_age'
final_df.loc[row, V] = 1
elif 'Vseason' in model:
V = 'V_season'
final_df.loc[row, V] = 1
elif 'Vimprinting' in model:
V = 'V_imprinting'
final_df.loc[row, V] = 1
elif 'Vcohort' in model:
V = 'V_cohort'
final_df.loc[row, V] = 1
elif 'Vmean' in model:
V = 'V_constant'
final_df.loc[row, V] = 1
if 'H' in model:
if 'subtype' in model:
final_df.loc[row, 'H_sub'] = 1
elif 'group' in model:
final_df.loc[row, 'H_group'] = 1
if 'N' in model:
if r['N2m'] != 0:
final_df.loc[row, 'N2'] = 0.5
else:
final_df.loc[row, 'N2'] = 0.5
final_df.loc[row, 'A'] = 1
final_df.loc[row, 'D'] = 0.25
final_df.loc[row, 'E'] = 0.25
final_df.loc[row, 'Ap'] = 0.25
final_df.loc[row, 'vac_cov'] = 0.25
final_df.loc[row, 'Nu'] = 0.25
#final_df.loc[row, '']
final_df.loc[row, 'cAIC'] = r.cAIC
row += 1
final_df = final_df.sort_values('cAIC')
final_df = final_df.fillna(0)
#final_df['cAIC'] = [np.exp(-0.5 * (c - min(final_df['cAIC']))) for c in final_df['cAIC']]
#final_df.index = ["%.4f" % (c/sum(final_df.cAIC)) for c in final_df['cAIC']]
final_df.index = ["%.4f" % (c - min(final_df['cAIC'])) for c in final_df['cAIC']]
final_df = final_df.loc[:, final_df.columns != 'cAIC']
final_df.columns = ['Demography',
'Enrollment fraction',
'Approachment fraction',
'Healthcare-seeking behavior among vaccinated',
'Nursing home residency',
'Age-specific risk of medically attended influenza A infection',
'N2 imprinting',
'HA imprinting (subtype)',
'HA imprinting (group)',
'Vaccine effectiveness (constant)',
'Vaccine effectiveness (age-specific)',
'Vaccine effectiveness (season-specific)',
'Vaccine effectiveness (imprinting-specific)',
'Vaccine effectiveness (cohort-specific)']
sns.heatmap(final_df, cmap=my_cmap, linewidths=1, linecolor='black', cbar=False, yticklabels=1)
ax = plt.gca()
ax.xaxis.tick_top()
plt.yticks(rotation=0, fontsize=10)
plt.xticks(rotation=45, ha='left', weight='bold')
plt.ylabel('Δ cAIC', weight='bold')
f = plt.gcf()
f.set_size_inches(5.5, 5.5)
plt.tight_layout()
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib import rcParams, patches
import seaborn as sns
import numpy as np
%matplotlib inline
H1_cohort_expectations = '../final_results_for_ms/15-100/DAHVcohort_subtype_H1_expectations.csv'
H1_age_expectations = '../final_results_for_ms/15-100/DAHVage_subtype_H1_expectations.csv'
H3_cohort_expectations = '../final_results_for_ms/15-100/DAHVcohort_subtype_H3_expectations.csv'
H3_age_expectations = '../final_results_for_ms/15-100/DAHVage_subtype_H3_expectations.csv'
def get_labels(age_classes):
labels = []
for l in age_classes:
if l == '65-100':
labels.append('65+')
else:
labels.append(l.replace('v',''))
return labels
def season_to_label(season):
if season == 2009.5:
label = '2009Pan'
else:
label = str(int(season) - 1) + '-' + str(int(season))
return label
rcParams['font.sans-serif'] = 'arial'
rcParams['font.size'] = 10
rcParams['font.weight'] = 'medium'
pal = sns.xkcd_palette(['dark sky blue', 'sky', 'deep red', 'baby pink']).as_hex()
h3_seasons = [2008, 2011, 2012, 2013, 2015, 2017, 2018]
kwargs={'linewidth': 1,
'zorder': 10,
'color': '#8a8a8a'}
subplot = 1
excess = []
resid_co = 0
resid_ag = 0
for season in range(2008, 2019):
if season not in h3_seasons and season != 2009.5:
df_cohort = pd.read_csv(H1_cohort_expectations, index_col=0)
df_age = pd.read_csv(H1_age_expectations, index_col=0)
df_cohort = df_cohort[df_cohort.vac_status == 'vaccinated']
df_age = df_age[df_age.vac_status == 'vaccinated']
plt.subplot(4,3,subplot)
codf = df_cohort[df_cohort.season==season].copy()
agdf = df_age[df_age.season==season].copy()
final_df = pd.merge(codf, agdf, suffixes=['_co', '_ag'], on=['age_group', 'Observed'])
final_df['Excess_co'] = final_df.Observed - final_df.Prediction_co
final_df['Excess_ag'] = final_df.Observed - final_df.Prediction_ag
new_rows = []
x = []
y1 = []
y2 = []
x1 = []
x2 = []
start = -0.2
for index, row in final_df.iterrows():
new_rows.append([row.age_group, row.Excess_co, 'Cohort VE'])
new_rows.append([row.age_group, row.Excess_ag, 'Age VE'])
x.append(start)
x.append(start + 0.4)
y2.append(row.ci_high_co - row.Prediction_co)
y2.append(row.ci_high_ag - row.Prediction_ag)
y1.append(row.ci_low_co - row.Prediction_co)
y1.append(row.ci_low_ag - row.Prediction_ag)
x1.append(start-0.1)
x2.append(start+0.1)
x1.append(start + 0.4 - 0.1)
x2.append(start + 0.4 + 0.1)
start += 1
plotdf = pd.DataFrame(new_rows, columns = ['Age group', 'Excess cases', 'VE type'])
plt.vlines(x=x, ymin=y1, ymax=y2, **kwargs)
plt.hlines(y=y1, xmin = x1, xmax=x2, **kwargs)
plt.hlines(y=y2, xmin = x1, xmax=x2, **kwargs)
ax = sns.barplot(data=plotdf, x='Age group', y='Excess cases', hue='VE type', palette=pal[0:2],edgecolor='#333333')
ax.legend_.remove()
check1 = final_df[(final_df.Observed < final_df.ci_low_ag) | (final_df.Observed > final_df.ci_high_ag)].copy()
check2 = final_df[(final_df.Observed < final_df.ci_low_co) | (final_df.Observed > final_df.ci_high_co)].copy()
elif season != 2009.5:
df_cohort = pd.read_csv(H3_cohort_expectations, index_col=0)
df_age = pd.read_csv(H3_age_expectations, index_col=0)
df_cohort = df_cohort[df_cohort.vac_status == 'vaccinated']
df_age = df_age[df_age.vac_status == 'vaccinated']
plt.subplot(4,3,subplot)
codf = df_cohort[df_cohort.season==season].copy()
agdf = df_age[df_age.season==season].copy()
final_df = pd.merge(codf, agdf, suffixes=['_co', '_ag'], on=['age_group', 'Observed'])
final_df['Excess_co'] = final_df.Observed - final_df.Prediction_co
final_df['Excess_ag'] = final_df.Observed - final_df.Prediction_ag
new_rows = []
x = []
x1 = []
x2 = []
y1 = []
y2 = []
start = -0.2
for index, row in final_df.iterrows():
new_rows.append([row.age_group, row.Excess_co, 'Cohort VE'])
new_rows.append([row.age_group, row.Excess_ag, 'Age VE'])
x.append(start)
x.append(start + 0.4)
y2.append(row.ci_high_co - row.Prediction_co)
y2.append(row.ci_high_ag - row.Prediction_ag)
y1.append(row.ci_low_co - row.Prediction_co)
y1.append(row.ci_low_ag - row.Prediction_ag)
x1.append(start-0.1)
x2.append(start+0.1)
x1.append(start + 0.4 - 0.1)
x2.append(start + 0.4 + 0.1)
start += 1
plotdf = pd.DataFrame(new_rows, columns = ['Age group', 'Excess cases', 'VE type'])
plt.vlines(x=x, ymin=y1, ymax=y2, **kwargs)
plt.hlines(y=y1, xmin = x1, xmax=x2, **kwargs)
plt.hlines(y=y2, xmin = x1, xmax=x2, **kwargs)
ax = sns.barplot(data=plotdf, x='Age group', y='Excess cases', hue='VE type', palette=pal[2:],edgecolor='#333333')
ax.legend_.remove()
check1 = final_df[(final_df.Observed < final_df.ci_low_ag) | (final_df.Observed > final_df.ci_high_ag)].copy()
check2 = final_df[(final_df.Observed < final_df.ci_low_co) | (final_df.Observed > final_df.ci_high_co)].copy()
plt.title(str(season - 1) + '-' + str(season), weight='bold')
plt.axhline(0, color='black', linewidth=1)
ticks, labels = plt.xticks()
if subplot not in [1,4,7,10]:
plt.ylabel('')
else:
plt.ylabel('Exceess cases\namong vaccinated\nindividuals', weight='bold')
if subplot not in [9, 10, 11]:
plt.xlabel('')
plt.xticks(ticks, [])
else:
plt.xlabel('Age group\n(years)', weight='bold')
plt.xticks(ticks, labels, rotation=45, ha='right')
plt.gcf().align_ylabels()
subplot += 1
xmin, xmax = plt.xlim()
plt.gcf().set_size_inches(5.5, 7)
plt.tight_layout()
b1, = plt.bar([10], [0], color=pal[0], edgecolor='#333333', label='H1N1 unvaccinated')
b2, = plt.bar([10], [0], color=pal[1], edgecolor='#333333', label='H1N1 vaccinated')
b3, = plt.bar([10], [0], color=pal[2], edgecolor='#333333', label='H3N2 unvaccinated')
b4, = plt.bar([10], [0], color=pal[3], edgecolor='#333333', label='H3N2 vaccinated')
plt.legend((b1, b2,b3,b4),
('H1N1 cohort VE model',
'H1N1 age VE model',
'H3N2 cohort VE model',
'H3N2 age VE model'),
loc='center',
bbox_to_anchor=(0.5, -1.6),
ncol=2)
plt.xlim(xmin, xmax)
```
| github_jupyter |
# Gradient-boosting decision tree (GBDT)
In this notebook, we will present the gradient boosting decision tree
algorithm and contrast it with AdaBoost.
Gradient-boosting differs from AdaBoost due to the following reason: instead
of assigning weights to specific samples, GBDT will fit a decision tree on
the residuals error (hence the name "gradient") of the previous tree.
Therefore, each new tree in the ensemble predicts the error made by the
previous learner instead of predicting the target directly.
In this section, we will provide some intuition about the way learners are
combined to give the final prediction. In this regard, let's go back to our
regression problem which is more intuitive for demonstrating the underlying
machinery.
```
import pandas as pd
import numpy as np
# Create a random number generator that will be used to set the randomness
rng = np.random.RandomState(0)
def generate_data(n_samples=50):
"""Generate synthetic dataset. Returns `data_train`, `data_test`,
`target_train`."""
x_max, x_min = 1.4, -1.4
len_x = x_max - x_min
x = rng.rand(n_samples) * len_x - len_x / 2
noise = rng.randn(n_samples) * 0.3
y = x ** 3 - 0.5 * x ** 2 + noise
data_train = pd.DataFrame(x, columns=["Feature"])
data_test = pd.DataFrame(np.linspace(x_max, x_min, num=300),
columns=["Feature"])
target_train = pd.Series(y, name="Target")
return data_train, data_test, target_train
data_train, data_test, target_train = generate_data()
import matplotlib.pyplot as plt
import seaborn as sns
sns.scatterplot(x=data_train["Feature"], y=target_train, color="black",
alpha=0.5)
_ = plt.title("Synthetic regression dataset")
```
As we previously discussed, boosting will be based on assembling a sequence
of learners. We will start by creating a decision tree regressor. We will set
the depth of the tree so that the resulting learner will underfit the data.
```
from sklearn.tree import DecisionTreeRegressor
tree = DecisionTreeRegressor(max_depth=3, random_state=0)
tree.fit(data_train, target_train)
target_train_predicted = tree.predict(data_train)
target_test_predicted = tree.predict(data_test)
```
Using the term "test" here refers to data that was not used for training.
It should not be confused with data coming from a train-test split, as it
was generated in equally-spaced intervals for the visual evaluation of the
predictions.
```
# plot the data
sns.scatterplot(x=data_train["Feature"], y=target_train, color="black",
alpha=0.5)
# plot the predictions
line_predictions = plt.plot(data_test["Feature"], target_test_predicted, "--")
# plot the residuals
for value, true, predicted in zip(data_train["Feature"],
target_train,
target_train_predicted):
lines_residuals = plt.plot([value, value], [true, predicted], color="red")
plt.legend([line_predictions[0], lines_residuals[0]],
["Fitted tree", "Residuals"])
_ = plt.title("Prediction function together \nwith errors on the training set")
```
<div class="admonition tip alert alert-warning">
<p class="first admonition-title" style="font-weight: bold;">Tip</p>
<p class="last">In the cell above, we manually edited the legend to get only a single label
for all the residual lines.</p>
</div>
Since the tree underfits the data, its accuracy is far from perfect on the
training data. We can observe this in the figure by looking at the difference
between the predictions and the ground-truth data. We represent these errors,
called "Residuals", by unbroken red lines.
Indeed, our initial tree was not expressive enough to handle the complexity
of the data, as shown by the residuals. In a gradient-boosting algorithm, the
idea is to create a second tree which, given the same data `data`, will try
to predict the residuals instead of the vector `target`. We would therefore
have a tree that is able to predict the errors made by the initial tree.
Let's train such a tree.
```
residuals = target_train - target_train_predicted
tree_residuals = DecisionTreeRegressor(max_depth=5, random_state=0)
tree_residuals.fit(data_train, residuals)
target_train_predicted_residuals = tree_residuals.predict(data_train)
target_test_predicted_residuals = tree_residuals.predict(data_test)
sns.scatterplot(x=data_train["Feature"], y=residuals, color="black", alpha=0.5)
line_predictions = plt.plot(
data_test["Feature"], target_test_predicted_residuals, "--")
# plot the residuals of the predicted residuals
for value, true, predicted in zip(data_train["Feature"],
residuals,
target_train_predicted_residuals):
lines_residuals = plt.plot([value, value], [true, predicted], color="red")
plt.legend([line_predictions[0], lines_residuals[0]],
["Fitted tree", "Residuals"], bbox_to_anchor=(1.05, 0.8),
loc="upper left")
_ = plt.title("Prediction of the previous residuals")
```
We see that this new tree only manages to fit some of the residuals. We will
focus on a specific sample from the training set (i.e. we know that the
sample will be well predicted using two successive trees). We will use this
sample to explain how the predictions of both trees are combined. Let's first
select this sample in `data_train`.
```
sample = data_train.iloc[[-2]]
x_sample = sample['Feature'].iloc[0]
target_true = target_train.iloc[-2]
target_true_residual = residuals.iloc[-2]
```
Let's plot the previous information and highlight our sample of interest.
Let's start by plotting the original data and the prediction of the first
decision tree.
```
# Plot the previous information:
# * the dataset
# * the predictions
# * the residuals
sns.scatterplot(x=data_train["Feature"], y=target_train, color="black",
alpha=0.5)
plt.plot(data_test["Feature"], target_test_predicted, "--")
for value, true, predicted in zip(data_train["Feature"],
target_train,
target_train_predicted):
lines_residuals = plt.plot([value, value], [true, predicted], color="red")
# Highlight the sample of interest
plt.scatter(sample, target_true, label="Sample of interest",
color="tab:orange", s=200)
plt.xlim([-1, 0])
plt.legend(bbox_to_anchor=(1.05, 0.8), loc="upper left")
_ = plt.title("Tree predictions")
```
Now, let's plot the residuals information. We will plot the residuals
computed from the first decision tree and show the residual predictions.
```
# Plot the previous information:
# * the residuals committed by the first tree
# * the residual predictions
# * the residuals of the residual predictions
sns.scatterplot(x=data_train["Feature"], y=residuals,
color="black", alpha=0.5)
plt.plot(data_test["Feature"], target_test_predicted_residuals, "--")
for value, true, predicted in zip(data_train["Feature"],
residuals,
target_train_predicted_residuals):
lines_residuals = plt.plot([value, value], [true, predicted], color="red")
# Highlight the sample of interest
plt.scatter(sample, target_true_residual, label="Sample of interest",
color="tab:orange", s=200)
plt.xlim([-1, 0])
plt.legend()
_ = plt.title("Prediction of the residuals")
```
For our sample of interest, our initial tree is making an error (small
residual). When fitting the second tree, the residual in this case is
perfectly fitted and predicted. We will quantitatively check this prediction
using the fitted tree. First, let's check the prediction of the initial tree
and compare it with the true value.
```
print(f"True value to predict for "
f"f(x={x_sample:.3f}) = {target_true:.3f}")
y_pred_first_tree = tree.predict(sample)[0]
print(f"Prediction of the first decision tree for x={x_sample:.3f}: "
f"y={y_pred_first_tree:.3f}")
print(f"Error of the tree: {target_true - y_pred_first_tree:.3f}")
```
As we visually observed, we have a small error. Now, we can use the second
tree to try to predict this residual.
```
print(f"Prediction of the residual for x={x_sample:.3f}: "
f"{tree_residuals.predict(sample)[0]:.3f}")
```
We see that our second tree is capable of predicting the exact residual
(error) of our first tree. Therefore, we can predict the value of `x` by
summing the prediction of all the trees in the ensemble.
```
y_pred_first_and_second_tree = (
y_pred_first_tree + tree_residuals.predict(sample)[0]
)
print(f"Prediction of the first and second decision trees combined for "
f"x={x_sample:.3f}: y={y_pred_first_and_second_tree:.3f}")
print(f"Error of the tree: {target_true - y_pred_first_and_second_tree:.3f}")
```
We chose a sample for which only two trees were enough to make the perfect
prediction. However, we saw in the previous plot that two trees were not
enough to correct the residuals of all samples. Therefore, one needs to
add several trees to the ensemble to successfully correct the error
(i.e. the second tree corrects the first tree's error, while the third tree
corrects the second tree's error and so on).
We will compare the generalization performance of random-forest and gradient
boosting on the California housing dataset.
```
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import cross_validate
data, target = fetch_california_housing(return_X_y=True, as_frame=True)
target *= 100 # rescale the target in k$
from sklearn.ensemble import GradientBoostingRegressor
gradient_boosting = GradientBoostingRegressor(n_estimators=200)
cv_results_gbdt = cross_validate(
gradient_boosting, data, target, scoring="neg_mean_absolute_error",
n_jobs=2,
)
print("Gradient Boosting Decision Tree")
print(f"Mean absolute error via cross-validation: "
f"{-cv_results_gbdt['test_score'].mean():.3f} +/- "
f"{cv_results_gbdt['test_score'].std():.3f} k$")
print(f"Average fit time: "
f"{cv_results_gbdt['fit_time'].mean():.3f} seconds")
print(f"Average score time: "
f"{cv_results_gbdt['score_time'].mean():.3f} seconds")
from sklearn.ensemble import RandomForestRegressor
random_forest = RandomForestRegressor(n_estimators=200, n_jobs=2)
cv_results_rf = cross_validate(
random_forest, data, target, scoring="neg_mean_absolute_error",
n_jobs=2,
)
print("Random Forest")
print(f"Mean absolute error via cross-validation: "
f"{-cv_results_rf['test_score'].mean():.3f} +/- "
f"{cv_results_rf['test_score'].std():.3f} k$")
print(f"Average fit time: "
f"{cv_results_rf['fit_time'].mean():.3f} seconds")
print(f"Average score time: "
f"{cv_results_rf['score_time'].mean():.3f} seconds")
```
In term of computation performance, the forest can be parallelized and will
benefit from using multiple cores of the CPU. In terms of scoring
performance, both algorithms lead to very close results.
However, we see that the gradient boosting is a very fast algorithm to
predict compared to random forest. This is due to the fact that gradient
boosting uses shallow trees. We will go into details in the next notebook
about the hyperparameters to consider when optimizing ensemble methods.
| github_jupyter |
# Introduction
## 1.1 Some Apparently Simple Questions
## 1.2 An Alternative Analytic Framework
Solved to a high degree of accuracy using numerical method
```
!pip install --user quantecon
import numpy as np
import numpy.linalg as la
from numba import *
from __future__ import division
#from quantecon.quad import qnwnorm
```
Suppose now that the economist is presented with a demand function
$$q = 0.5* p^{-0.2} + 0.5*p^{-0.5}$$
one that is the sum a domestic demand term and an export demand term.
suppose that the economist is asked to find the price that clears the
market of, say, a quantity of 2 units.
```
#%pylab inline
%pylab notebook
# pylab Populating the interactive namespace from numpy and matplotlib
# numpy for numerical computation
# matplotlib for ploting
#http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.plot
p = np.linspace(0.01,0.5, 100)
q = .5 * p **-.2 + .5 * p ** -.5 - 2
plot(q,p)
x1,x2,y1,y2 = 2, 2, 0, 0.5
plot((x1, x2), (y1, y2), 'k-')
# example 1.2
p = 0.25
for i in range(100):
deltap = (.5 * p **-.2 + .5 * p ** -.5 - 2)/(.1 * p **-1.2 + .25 * p **-1.5)
p = p + deltap
if abs(deltap) < 1.e-8: # accuracy
break
#https://stackoverflow.com/questions/20457038/python-how-to-round-down-to-2-decimals
print('The market clean price is {:0.2f} '.format(p))
```
Consider now the rational expectations commodity market model with government
intervention. The source of difficulty in solving this problem is the need to
evaluate the truncated expectation of a continuous distribution.
The economist would replace the original normal yield distribution
with a discrete distribution that has identical lower moments, say one that assumes
values y1; y2; ... ; yn with probabilities w1; w2; ...; wn.
```
# https://github.com/QuantEcon/QuantEcon.py/blob/master/quantecon/quad.py
def qnwnorm(n, mu=None, sig2=None, usesqrtm=False):
"""
Computes nodes and weights for multivariate normal distribution
Parameters
----------
n : int or array_like(float)
A length-d iterable of the number of nodes in each dimension
mu : scalar or array_like(float), optional(default=zeros(d))
The means of each dimension of the random variable. If a scalar
is given, that constant is repeated d times, where d is the
number of dimensions
sig2 : array_like(float), optional(default=eye(d))
A d x d array representing the variance-covariance matrix of the
multivariate normal distribution.
Returns
-------
nodes : np.ndarray(dtype=float)
Quadrature nodes
weights : np.ndarray(dtype=float)
Weights for quadrature nodes
Notes
-----
Based of original function ``qnwnorm`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
n = np.asarray(n)
d = n.size
if mu is None:
mu = np.zeros((d,1))
else:
mu = np.asarray(mu).reshape(-1, 1)
if sig2 is None:
sig2 = np.eye(d)
else:
sig2 = np.asarray(sig2).reshape(d, d)
if all([x.size == 1 for x in [n, mu, sig2]]):
nodes, weights = _qnwnorm1(n)
else:
nodes = []
weights = []
for i in range(d):
_1d = _qnwnorm1(n[i])
nodes.append(_1d[0])
weights.append(_1d[1])
nodes = gridmake(*nodes)
weights = ckron(*weights[::-1])
if usesqrtm:
new_sig2 = la.sqrtm(sig2)
else: # cholesky
new_sig2 = la.cholesky(sig2)
if d > 1:
nodes = new_sig2.dot(nodes) + mu # Broadcast ok
else: # nodes.dot(sig) will not be aligned in scalar case.
nodes = nodes * new_sig2 + mu
return nodes.squeeze(), weights
def _qnwnorm1(n):
"""
Compute nodes and weights for quadrature of univariate standard
normal distribution
Parameters
----------
n : int
The number of nodes
Returns
-------
nodes : np.ndarray(dtype=float)
An n element array of nodes
nodes : np.ndarray(dtype=float)
An n element array of weights
Notes
-----
Based of original function ``qnwnorm1`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
maxit = 100
pim4 = 1 / np.pi**(0.25)
m = np.fix((n + 1) / 2).astype(int)
nodes = np.zeros(n)
weights = np.zeros(n)
for i in range(m):
if i == 0:
z = np.sqrt(2*n+1) - 1.85575 * ((2 * n + 1)**(-1 / 6.1))
elif i == 1:
z = z - 1.14 * (n ** 0.426) / z
elif i == 2:
z = 1.86 * z + 0.86 * nodes[0]
elif i == 3:
z = 1.91 * z + 0.91 * nodes[1]
else:
z = 2 * z + nodes[i-2]
its = 0
while its < maxit:
its += 1
p1 = pim4
p2 = 0
for j in range(1, n+1):
p3 = p2
p2 = p1
p1 = z * math.sqrt(2.0/j) * p2 - math.sqrt((j - 1.0) / j) * p3
pp = math.sqrt(2 * n) * p2
z1 = z
z = z1 - p1/pp
if abs(z - z1) < 1e-14:
break
if its == maxit:
raise ValueError("Failed to converge in _qnwnorm1")
nodes[n - 1 - i] = z
nodes[i] = -z
weights[i] = 2 / (pp*pp)
weights[n - 1 - i] = weights[i]
weights /= math.sqrt(math.pi)
nodes = nodes * math.sqrt(2.0)
return nodes, weights
# example 1.2
y, w = qnwnorm(10, 1, 0.1)
a = 1
for it in range(100):
aold = a
p = 3 - 2 * a * y
f = w.dot(np.maximum(p, 1))
a = 0.5 + 0.5 * f
if abs(a - aold) < 1.e-8:
break
print('The rational expectations equilibrium acreage is {:0.2f} '.format(a) )
print('The expected market price is {:0.2f} '.format(np.dot(w, p)) )
print('The expected effective producer price is {:0.2f} '.format(f) )
```
The economist has combined Gaussian quadrature techniques and fixed-point function iteration methods to solve the problem.
| github_jupyter |
## Linear Algebra
Those exercises will involve vector and matrix math, the <a href="http://wiki.scipy.org/Tentative_NumPy_Tutorial">NumPy</a> Python package.
This exercise will be divided into two parts:
#### 1. Math checkup
Where you will do some of the math by hand.
#### 2. NumPy and Spark linear algebra
You will do some exercise using the NumPy package.
<br>
In the following exercises you will need to replace the code parts in the cell that starts with following comment: "#Replace the `<INSERT>`"
To go through the notebook fill in the `<INSERT>`:s with appropriate code in the cells.
To run a cell press Shift-Enter to run it and advance to the following cell or Ctrl-Enter to only run the code in the cell. You should do the exercises from the top to the bottom in this notebook, because following cells may depend on code in previous cells.
If you want to execute these lines in a python script, you will need to create first a spark context:
```
#from pyspark import SparkContext, StorageLevel \
#from pyspark.sql import SQLContext \
#sc = SparkContext(master="local[*]") \
#sqlContext = SQLContext(sc) \
```
But since we are using the notebooks, those lines are not needed here.
## 1. Math checkup
### 1.1 Euclidian norm
$$
\mathbf{v} = \begin{bmatrix}
666 \\
1337 \\
1789 \\
1066 \\
1945 \\
\end{bmatrix}
\qquad
\|\mathbf{v}\| = ?
$$
Calculate the euclidian norm for the $\mathbf{v}$ using the following definition:
$$
\|\mathbf{v}\|_2 = \sqrt{\sum\limits_{i=1}^n {x_i}^2} = \sqrt{{x_1}^2+\cdots+{x_n}^2}
$$
```
#Replace the <INSERT>
import math
import numpy as np
v = [666, 1337, 1789, 1066, 1945]
rdd = sc.parallelize(v)
#sumOfSquares = rdd.map(<INSERT>).reduce(<INSERT>)
sumOfSquares = rdd.map(lambda x: x*x ).reduce(lambda x,y : x+y)
norm = math.sqrt(sumOfSquares)
# <INSERT round to 8 decimals >
norm = format(norm, '.8f')
norm_numpy= np.linalg.norm(v)
print("norm: "+str(norm) +" norm_numpy: "+ str(norm_numpy))
#Helper function to check results
import hashlib
def hashCheck(x, hashCompare): #Defining a help function
hash = hashlib.md5(str(x).encode('utf-8')).hexdigest()
print(hash)
if hash == hashCompare:
print('Yay, you succeeded!')
else:
print('Try again!')
def check(x,y,label):
if(x == y):
print("Yay, "+label+" is correct!")
else:
print("Nay, "+label+" is incorrect, please try again!")
def checkArray(x,y,label):
if np.allclose(x,y):
print("Yay, "+label+" is correct!")
else:
print("Nay, "+label+" is incorrect, please try again!")
#Check if the norm is correct
hashCheck(norm_numpy, '6de149ccbc081f9da04a0bbd8fe05d8c')
```
### 1.2 Transpose
$$
\mathbf{A} = \begin{bmatrix}
1 & 2 & 3\\
4 & 5 & 6\\
7 & 8 & 9\\
\end{bmatrix}
\qquad
\mathbf{A}^T = ?
$$
Tranpose is an operation on matrices that swaps the row for the columns.
$$
\begin{bmatrix}
2 & 7 \\
3 & 11\\
5 & 13\\
\end{bmatrix}^T
\Rightarrow
\begin{bmatrix}
2 & 3 & 5 \\
7 & 11 & 13\\
\end{bmatrix}
$$
Do the transpose of A by hand and write it in:
```
#Replace the <INSERT>
#Input aT like this: AT = [[1, 2, 3],[4, 5, 6],[7, 8, 9]]
#At = <INSERT>
A= np.matrix([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
print(A)
print("\n")
At = np.matrix.transpose(A)
print (At)
At =[[1,4, 7],[2, 5, 8],[3, 6, 9]]
print("\n")
print (At)
#Check if the transpose is correct
hashCheck(At, '1c8dc4c2349277cbe5b7c7118989d8a5')
```
### 1.3 Scalar matrix multiplication
$$
\mathbf{A} = 3\times\begin{bmatrix}
1 & 2 & 3\\
4 & 5 & 6\\
7 & 8 & 9\\
\end{bmatrix}
=?
\qquad
\mathbf{B} = 5\times\begin{bmatrix}
1\\
-4\\
7\\
\end{bmatrix}
=?
$$
The operation is done element-wise, e.g. $k\times\mathbf{A}=\mathbf{C}$ then $k\times a_{i,j}={k}c_{i,j}$.
$$
2
\times
\begin{bmatrix}
1 & 6 \\
4 & 8 \\
\end{bmatrix}
=
\begin{bmatrix}
2\times1& 2\times6 \\
2\times4 & 2\times8\\
\end{bmatrix}
=
\begin{bmatrix}
2& 12 \\
8 & 16\\
\end{bmatrix}
$$
$$
11
\times
\begin{bmatrix}
2 \\
3 \\
5 \\
\end{bmatrix}
=
\begin{bmatrix}
11\times2 \\
11\times3 \\
11\times5 \\
\end{bmatrix}
=
\begin{bmatrix}
22\\
33\\
55\\
\end{bmatrix}
$$
Do the scalar multiplications of $\mathbf{A}$ and $\mathbf{B}$ by hand and write them in:
```
#Replace the <INSERT>
#Input A like this: A = [[1, 2, 3],[4, 5, 6],[7, 8, 9]]
#And B like this: B = [1, -4, 7]
#A = <INSERT>
#B = <INSERT>
A = np.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
print(3*A)
print ("\n")
B = np.array([1, -4, 7])
print (5*B)
print ("\n")
A = [[ 3, 6, 9], [12, 15,18], [21, 24, 27]]
B = [5, -20, 35]
#Check if the scalar matrix multiplication is correct
hashCheck(A, '91b9508ec9099ee4d2c0a6309b0d69de')
hashCheck(B, '88bddc0ee0eab409cee011770363d007')
```
### 1.4 Dot product
$$
c_1=\begin{bmatrix}
11 \\
2 \\
\end{bmatrix}
\cdot
\begin{bmatrix}
3 \\
5 \\
\end{bmatrix}
=?
\qquad
c_2=\begin{bmatrix}
1 \\
2 \\
3 \\
\end{bmatrix}
\cdot
\begin{bmatrix}
4 \\
5 \\
6 \\
\end{bmatrix}
=?
$$
The operations are done element-wise, e.g. $\mathbf{v}\cdot\mathbf{w}=k$ then $\sum v_i \times w_i =k$
$$
\begin{bmatrix}
2 \\
3 \\
5 \\
\end{bmatrix}
\cdot
\begin{bmatrix}
1 \\
4 \\
6 \\
\end{bmatrix}
= 2\times1+3\times4+5\times6=44
$$
Calculate the values of $c_1$ and $c_2$ by hand and write them in:
```
#Replace the <INSERT>
#Input c1 and c2 like this: c = 1337
#c1 = <INSERT>
#c2 = <INSERT>
c1_1 = np.array([11,2])
c1_2 = np.array([3,5])
c1 = c1_1.dot(c1_2)
print (c1)
c1 = 43
c2_1 = np.array([1,2,3])
c2_2 = np.array([4,5,6])
c2 = c2_1.dot(c2_2)
print (c2)
c2 = 32
#Check if the dot product is correct
hashCheck(c1, '17e62166fc8586dfa4d1bc0e1742c08b')
hashCheck(c2, '6364d3f0f495b6ab9dcf8d3b5c6e0b01')
```
### 1.5 Matrix multiplication
$$
\mathbf{A}=
\begin{bmatrix}
682 & 848 & 794 & 954 \\
700 & 1223 & 1185 & 816 \\
942 & 428 & 324 & 526 \\
321 & 543 & 532 & 614 \\
\end{bmatrix}
\qquad
\mathbf{B}=
\begin{bmatrix}
869 & 1269 & 1306 & 358 \\
1008 & 836 & 690 & 366 \\
973 & 619 & 407 & 1149 \\
323 & 42 & 405 & 117 \\
\end{bmatrix}
\qquad
\mathbf{A}\times\mathbf{B}=\mathbf{C}=?
$$
The $c_{i,j}$ entry is the dot product of the i-th row in $\mathbf{A}$ and the j-th column in $\mathbf{B}$
Calculate $\mathbf{C}$ by implementing the naive matrix multiplication algotrithm with $\mathcal{O}(n^3)$ run time, by using the tree nested for-loops below:
```
# The convention is to import NumPy as the alias np
import numpy as np
A = [[ 682, 848, 794, 954],
[ 700, 1223, 1185, 816],
[ 942, 428, 324, 526],
[ 321, 543, 532, 614]]
B = [[ 869, 1269, 1306, 358],
[1008, 836, 690, 366],
[ 973, 619, 407, 1149],
[ 323, 42, 405, 117]]
C = [[0]*4 for i in range(4)]
#Iterate through rows of A
for i in range(len(A)):
#Iterate through columns of B
for j in range(len(B[0])):
#Iterate through rows of B
for k in range(len(B)):
C[i][j] += A[i][k] * B[k][j]
print(np.matrix(C))
print(np.matrix(A)*np.matrix(B))
#Check if the matrix multiplication is correct
hashCheck(C, 'f6b7b0500a6355e8e283f732ec28fa76')
```
## 2. NumPy and Spark linear algebra
A python library to utilize arrays is <a href="http://wiki.scipy.org/Tentative_NumPy_Tutorial">NumPy</a>. The library is optimized to be fast and memory efficient, and provide abstractions corresponding to vectors, matrices and the operations done on these objects.
Numpy's array class is called <a href="http://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html">ndarray</a>, it is also known by the alias array. This is a multidimensional array of fixed-size that contains numerical elements of one type, e.g. floats or integers.
### 2.1 Scalar matrix multiplication using NumPy
$$
\mathbf{A} = \begin{bmatrix}
1 & 2 & 3\\
4 & 5 & 6\\
7 & 8 & 9\\
\end{bmatrix}
\quad
5\times\mathbf{A}=\mathbf{C}=?
\qquad
\mathbf{B} = \begin{bmatrix}
1&-4& 7\\
\end{bmatrix}
\quad
3\times\mathbf{B}=\mathbf{D}=?
$$
Utilizing the <a href="http://docs.scipy.org/doc/numpy/reference/generated/numpy.array.html">np.array()</a> function create the above matrix $\mathbf{A}$ and vector $\mathbf{B}$ and multiply it by 5 and 3 correspondingly.
Note that if you use a Python list of integers to create an array you will get a one-dimensional array, which is, for our purposes, equivalent to a vector.
Calculate C and D by inputting the following statements:
```
#Replace the <INSERT>. You will use np.array()
A = np.array([[1, 2, 3],[4,5,6],[7,8,9]])
B = np.array([1,-4, 7])
C = A *5
D = 3 * B
print(A)
print(B)
print(C)
print(D)
#Check if the scalar matrix multiplication is correct
checkArray(C,[[5, 10, 15],[20, 25, 30],[35, 40, 45]], "the scalar multiplication")
checkArray(D,[3, -12, 21], "the scalar multiplication")
```
### 2.2 Dot product and element-wise multiplication
Both dot product and element-wise multiplication is supported by ndarrays.
Element-wise multiplication is the standard between two arrays, of the same dimension, using the operator *.
The dot product you can use either <a href="http://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html#numpy.dot">np.dot()</a> or <a href="http://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.dot.html">np.array.dot()</a>. The dot product is a commutative operation, i.e. the order of the arrays doe not matter, e.g. if you have the ndarrays x and y, you can write the dot product as any of the following four ways: np.dot(x, y), np.dot(y, x), x.dot(y), or y.dot(x).
Calculate the element wise product and the dot product by filling in the following statements:
```
#Replace the <INSERT>
u = np.arange(0, 5)
v = np.arange(5, 10)
elementWise = np.multiply(u,v)
dotProduct = np.dot(u,v)
print(elementWise)
print(dotProduct)
#Check if the dot product and element wise is correct
checkArray(elementWise,[0,6,14,24,36], "the element wise multiplication")
check(dotProduct, 80, "the dot product")
```
### 2.3 Cosine similarity
The cosine similarity between two vectors is defined as the following equation:
$$
cosine\_similarity(u,v)=\cos\theta=\frac{\mathbf{u}\cdot\mathbf{v}}{\|u\|\|v\|}
$$
The norm of a vector $\|v\|$ can be calculated by using <a href="http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.norm.html#numpy.linalg.norm">np.linalg.norm()</a>.
Implement the following function that calculates the cosine similarity:
```
def cosine_similarity(u,v):
dotProduct = np.dot(u,v)
normProduct = np.linalg.norm(u)*np.linalg.norm(v)
return dotProduct/normProduct
u = np.array([2503,2992,1042])
v = np.array([2217,2761,990])
w = np.array([0,1,1])
x = np.array([1,0,1])
uv = cosine_similarity(u,v)
wx = cosine_similarity(w,x)
print(uv)
print(wx)
#Check if the cosine similarity is correct
check(round(uv,5),0.99974,"cosine similarity between u and v")
check(round(wx,5),0.5,"cosine similarity between w and x")
```
### 2.4 Matrix math
To represent matrices, you can use the following class: <a href="http://docs.scipy.org/doc/numpy/reference/generated/numpy.matrix.html">np.matrix()</a>. To create a matrix object either pass it a two-dimensional ndarray, or a list of lists to the function, or a string e.g. '1 2; 3 4'. Instead of element-wise multiplication, the operator *, does matrix multiplication.
To transpose a matrix, you can use either <a href="http://docs.scipy.org/doc/numpy/reference/generated/numpy.matrix.transpose.html">np.matrix.transpose()</a> or <a href="http://docs.scipy.org/doc/numpy/reference/generated/numpy.matrix.T.html">.T</a> on the matrix object.
To calculate the inverse of a matrix, you can use <a href="http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.inv.html">np.linalg.inv()</a> or <a href="docs.scipy.org/doc/numpy/reference/generated/numpy.matrix.I.htmll">.I</a> on the matrix object, remember that the inverse of a matrix is only defined on square matrices, and is does not always exist (for sufficient requirements of invertibility look up the: <a href="https://en.wikipedia.org/wiki/Invertible_matrix#The_invertible_matrix_theorem">The invertible matrix theorem</a>) and it will then raise a LinAlgError. If you multiply the original matrix with its inverse, you get the identity matrix, which is a square matrix with ones on the main diagonal and zeros elsewhere., e.g. $\mathbf{A} \mathbf{A}^{-1} = \mathbf{I_n}$
In the following exercise, you should calculate $\mathbf{A}^T$ multiply it by $\mathbf{A}$ and then inverting the product $\mathbf{AA}^T$ and finally multiply $\mathbf{AA}^T[\mathbf{AA}^T]^{-1}=\mathbf{I}_n$ to get the identity matrix:
```
#Replace the <INSERT>
#We generate a Vandermonde matrix
A = np.mat(np.vander([2,3], 5))
print(A)
#Calculate the transpose of A
At = np.transpose(A)
print(At)
#Calculate the multiplication of A and A^T
AAt = np.dot(A,At)
print(AAt)
#Calculate the inverse of AA^T
AAtInv = np.linalg.inv(AAt)
print(AAtInv)
#Calculate the multiplication of AA^T and (AA^T)^-1
I = np.dot(AAt,AAtInv)
print(I)
#To get the identity matrix we round it because of numerical precision
I = I.round(13)
#Check if the matrix math is correct
checkArray(I,[[1.,0.], [0.,1.]], "the matrix math")
```
### 2.5 Slices
It is possible to select subsets of one-dimensional arrays using <a href="http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html">slices</a>. The basic syntax for slices is $\mathbf{v}$[i:j:k] where i is the starting index, j is the stopping index, and k is the step ($k\neq0$), the default value for k, if it is not specified, is 1. If no i is specified, the default value is 0, and if no j is specified, the default value is the end of the array.
For example [0,1,2,3,4][:3] = [0,1,2] i.e. the three first elements of the array. You can use negative indices also, for example [0,1,2,3,4][-3:] = [2,3,4] i.e. the three last elements.
The following function can be used to concenate 2 or more arrays: <a href="http://docs.scipy.org/doc/numpy/reference/generated/numpy.concatenate.html">np.concatenate</a>, the syntax is np.concatenate((a1, a2, ...)).
Slice the following array in 3 pieces and concenate them together to form the original array:
```
#Replace the <INSERT>
v = np.arange(1, 9)
print(v)
#The first two elements of v
v1 = v[-2:]
#The last two elements of v
v3 = v[:-2]
#The middle four elements of v
v2 = v[3:7]
print(v1)
print(v2)
print(v3)
#Concatenating the three vectors to get the original array
u = np.concatenate((v1, v2, v3))
```
### 2.6 Stacking
There exist many functions provided by the NumPy library to <a href="http://docs.scipy.org/doc/numpy/reference/routines.array-manipulation.html">manipulate</a> existing arrays. We will try out two of these methods <a href="docs.scipy.org/doc/numpy/reference/generated/numpy.hstack.html">np.hstack()</a> which takes two or more arrays and stack them horizontally to make a single array (column wise, equvivalent to np.concatenate), and <a href="docs.scipy.org/doc/numpy/reference/generated/numpy.vstack.html">np.vstack()</a> which takes two or more arrays and stack them vertically (row wise). The syntax is the following np.vstack((a1, a2, ...)).
Stack the two following array $\mathbf{u}$ and $\mathbf{v}$ to create a 1x20 and a 2x10 array:
```
#Replace the <INSERT>
u = np.arange(1, 11)
v = np.arange(11, 21)
#A 1x20 array
oneRow = np.hstack((u,v))
print(oneRow)
#A 2x10 array
twoRows = np.vstack((u,v))
print(twoRows)
#Check if the stacks are correct
checkArray(oneRow,[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20], "the hstack")
checkArray(twoRows,[[1,2,3,4,5,6,7,8,9,10],[11,12,13,14,15,16,17,18,19,20]], "the vstack")
```
### 2.7 PySpark's DenseVector
In PySpark there exists a <a href="https://spark.apache.org/docs/latest/api/python/pyspark.mllib.html#pyspark.mllib.linalg.DenseVector">DenseVector</a> class within the module <a href="https://spark.apache.org/docs/latest/api/python/pyspark.mllib.html#module-pyspark.mllib.linalg">pyspark.mllib.linalg</a>. The DenseVector stores the values as a NumPy array and delegates the calculations to this object. You can create a new DenseVector by using DenseVector() and passing it an NumPy array or a Python list.
The DenseVector class implements several functions, one important is the dot product, DenseVector.dot(), which operates just like np.ndarray.dot().
The DenseVector save all values as np.float64, so even if you pass it an integer vector, the resulting vector will contain floats. Using the DenseVector in a distributed setting, can be done by either passing functions that contain them to resilient distributed dataset (RDD) transformations or by distributing them directly as RDDs.
Create the DenseVector $\mathbf{u}$ containing the 10 elements [0.1,0.2,...,1.0] and the DenseVector $\mathbf{v}$ containing the 10 elements [1.0,2.0,...,10.0] and calculate the dot product of $\mathbf{u}$ and $\mathbf{v}$:
```
#To use the DenseVector first import it
from pyspark.mllib.linalg import DenseVector
#Replace the <INSERT>
#[0.1,0.2,...,1.0]
u = DenseVector((0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1))
print(u)
#[1.0,2.0,...,10.0]
v = DenseVector((1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0))
print(v)
#The dot product between u and v
dotProduct = np.dot(u,v)
#Check if the dense vectors are correct
check(dotProduct, 38.5, "the dense vectors")
```
| github_jupyter |
# Metadata Organization
## Imports
```
import pandas as pd
import numpy as np
import os.path
import glob
import pathlib
import functools
import time
import re
import gc
from nilearn.input_data import NiftiMasker
import nibabel as nib
from nilearn import image
from joblib import Parallel, delayed
```
## Load configs (all patterns/files/folderpaths)
```
import configurations
configs = configurations.Config('sub-xxx-resamp-intersected')
```
## Function to find all the regressor file paths
```
def timer(func):
"""Print the runtime of the decorated function"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
print(f'Calling {func.__name__!r}')
startTime = time.perf_counter()
value = func(*args, **kwargs)
endTime = time.perf_counter()
runTime = endTime - startTime
print(f'Finished {func.__name__!r} in {runTime:.4f} secs')
return value
return wrapper
```
## Function to find all the BOLD NII file paths
```
@timer
def find_paths(relDataFolder, subj, sess, func, patt):
paths = list(pathlib.Path(relDataFolder).glob(
os.path.join(subj, sess, func, patt)
)
)
return paths
```
## Find all the regressor file paths
```
regressor_paths = find_paths(relDataFolder=configs.dataDir,
subj='sub-*',
sess='ses-*',
func='func',
patt=configs.confoundsFilePattern)
regressor_paths
```
## Find all the BOLD NII file paths
```
nii_paths = find_paths(relDataFolder=configs.dataDir,
subj='sub-*',
sess='ses-*',
func='func',
patt=configs.maskedImagePattern)
nii_paths
```
## Read the participants.tsv file to find summaries of the subjects
```
participant_info_df = pd.read_csv(
configs.participantsSummaryFile,
sep='\t'
)
participant_info_df
```
## Get a mapping Dataframe of subject and which session is the sleep deprived one
```
@timer
def map_sleepdep(participant_info):
df = pd.DataFrame(participant_info.loc[:,['participant_id', 'Sl_cond']])
df.replace('sub-', '', inplace=True, regex=True)
return df.rename(columns={'participant_id':'subject', 'Sl_cond':'sleepdep_session'})
sleepdep_map = map_sleepdep(participant_info_df)
sleepdep_map
```
## Get Dataframe of subject, session, task, path
```
@timer
def get_bids_components(paths):
components_list = []
for i, path in enumerate(paths):
filename = path.stem
dirpath = path.parents[0]
matches = re.search(
'[a-z0-9]+\-([a-z0-9]+)_[a-z0-9]+\-([a-z0-9]+)_[a-z0-9]+\-([a-z0-9]+)',
filename
)
subject = matches.group(1)
session = matches.group(2)
task = matches.group(3)
confound_file = path.with_name(
'sub-'+subject+'_ses-'+session+'_task-'+task+'_desc-confounds_regressors.tsv'
)
components_list.append([subject, session, task,
path.__str__(), confound_file.__str__(), 0]
)
df = pd.DataFrame(components_list,
columns=['subject', 'session', 'task', 'path', 'confound_path', 'sleepdep']
)
return df
bids_comp_df = get_bids_components(nii_paths)
bids_comp_df
```
## Combine logically sleepdep_map and components_df into 1 dataframe
```
sleep_bids_comb_df = bids_comp_df.merge(sleepdep_map, how='left')
```
## Response column 'sleepdep' imputed from 'session' 'sleepdep_session'
```
for i in range(len(sleep_bids_comb_df)):
if (int(sleep_bids_comb_df['session'].iloc[i]) ==
int(sleep_bids_comb_df['sleepdep_session'].iloc[i])):
sleep_bids_comb_df['sleepdep'].iloc[i] = 1
sleep_bids_comb_df
```
## Get confounds that can be used further clean up the signal or for prediction
```
def get_important_confounds(regressor_paths, important_reg_list, start, end):
regressors_df_list = []
for paths in regressor_paths:
regressors_all = pd.DataFrame(pd.read_csv(paths, sep="\t"))
regressors_selected = pd.DataFrame(regressors_all[important_reg_list].loc[start:end-1])
regressors_df_list.append(pd.DataFrame(regressors_selected.stack(0)).transpose())
concatenated_df = pd.concat(regressors_df_list, ignore_index=True)
concatenated_df.columns = [col[1] + '-' + str(col[0]) for col in concatenated_df.columns.values]
return concatenated_df
important_reg_list = ['csf', 'white_matter', 'global_signal',
'trans_x', 'trans_y', 'trans_z',
'rot_x', 'rot_y', 'rot_z',
'csf_derivative1', 'white_matter_derivative1', 'global_signal_derivative1',
'trans_x_derivative1', 'trans_y_derivative1', 'trans_z_derivative1',
'rot_x_derivative1', 'rot_y_derivative1', 'rot_z_derivative1',
'csf_power2', 'white_matter_power2', 'global_signal_power2',
'trans_x_power2', 'trans_y_power2', 'trans_z_power2',
'rot_x_power2', 'rot_y_power2', 'rot_z_power2',
'csf_derivative1_power2', 'white_matter_derivative1_power2', 'global_signal_derivative1_power2',
'trans_x_derivative1_power2', 'trans_y_derivative1_power2', 'trans_z_derivative1_power2',
'rot_x_derivative1_power2', 'rot_y_derivative1_power2', 'rot_z_derivative1_power2'
]
important_confounds_df = get_important_confounds(
sleep_bids_comb_df['confound_path'], important_reg_list, configs.startSlice, configs.endSlice
)
```
## Load the masker data file to prepare to apply to images
```
masker = NiftiMasker(mask_img=configs.maskDataFile, standardize=False)
```
## Helper to generate raw voxel df from a given path + masker and print shape for sanity
```
@timer
def gen_one_voxel_df(filepath, masker, start, end):
masked_array = masker.fit_transform(image.index_img(filepath, slice(start,end)))
reshaped_array = pd.DataFrame(np.reshape(
masked_array.ravel(), newshape=[1,-1]), dtype='float32')
print('> Shape of raw voxels for file ' +
'\"' + pathlib.Path(filepath).stem + '\" ' +
'is: \n' +
'\t 1-D (UnMasked+Sliced): ' + str(reshaped_array.shape) + '\n' +
'\t 2-D (UnMasked+Sliced): ' + str(masked_array.shape) + '\n' +
'\t 4-D (Raw header) : ' + str(nib.load(filepath).header.get_data_shape())
)
return reshaped_array
```
## Function to generate from masked image the raw voxel df from all images in folder
```
@timer
def get_voxels_df(metadata_df, masker, start, end):
rawvoxels_list = []
print() # Print to add a spacer for aesthetics
#below has been parallelized
for i in range(len(metadata_df)):
rawvoxels_list.append(gen_one_voxel_df(metadata_df['path'].iloc[i], masker, start, end))
print() # Print to add a spacer for aesthetics
# rawvoxels_list.append(Parallel(n_jobs=-1, verbose=100)(delayed(gen_one_voxel_df)(metadata_df['path'].iloc[i], masker, start, end) for i in range(len(metadata_df))))
print() # Print to add a spacer for aesthetics
tmp_df = pd.concat(rawvoxels_list, ignore_index=True)
tmp_df['sleepdep'] = metadata_df['sleepdep']
temp_dict = dict((val, str(val)) for val in list(range(len(tmp_df.columns)-1)))
return tmp_df.rename(columns=temp_dict, errors='raise')
```
## Garbage collect
```
gc.collect()
```
## Get/Generate raw voxels dataframe from all images with Y column label included
```
voxels_df = get_voxels_df(sleep_bids_comb_df, masker, configs.startSlice, configs.endSlice)
X = pd.concat([voxels_df, important_confounds_df], axis=1)
```
## Separately get the Y label
```
Y = sleep_bids_comb_df['sleepdep']
```
## Save raw dataframe with Y column included to a file
```
X.to_pickle(configs.rawVoxelFile)
```
| github_jupyter |
```
import numpy as np
%matplotlib notebook
import matplotlib.pyplot as plt
nu = np.linspace(1e9, 200e9)
ElectronCharge = 4.803e-10
ElectronMass = 9.1094e-28
SpeedLight = 3e10
def plot_ql_approx(magField, thetaDeg, plasmaDens, ax=None):
gyroFreq = ElectronCharge * magField / (2 * np.pi * ElectronMass * SpeedLight)
plasmaFreq = ElectronCharge * np.sqrt(plasmaDens / (np.pi * ElectronMass))
theta = np.deg2rad(thetaDeg)
approx = (nu**2 - plasmaFreq**2) / (nu * gyroFreq)
limit = 0.5 * np.sin(theta)**2 / np.abs(np.cos(theta))
if ax == None:
plt.figure()
plt.semilogx(nu, approx, label='approximation')
plt.axhline(limit, color='r', label='limit')
plt.semilogx(nu, approx / limit, label='ratio')
plt.legend()
plt.xlabel('Frequency [Hz]')
plt.title(r'Validity of QL approximation for B=%.1f G,''\n'r'$\theta=$%.1f$\degree$ and $n_p$=%.1e cm$^{-3}$' % (magField, thetaDeg, plasmaDens))
else:
ax.semilogx(nu, approx, label='approximation')
ax.axhline(limit, color='r', label='limit')
ax.semilogx(nu, approx / limit, label='ratio')
ax.set_xlabel('Frequency [Hz]')
ax.set_title(r'Validity of QL approximation for B=%.1f G,''\n'r'$\theta=$%.1f$\degree$ and $n_p$=%.1e cm$^{-3}$' % (magField, thetaDeg, plasmaDens))
fig, ax = plt.subplots(2, 2, figsize=(10,10))
plas = 1.51e11
plot_ql_approx(2000, 10, plas, ax=ax[0,0])
ax[0,0].set_title(r'$\theta=10\degree$')
plot_ql_approx(2000, 30, plas, ax=ax[0,1])
ax[0,1].set_title(r'$\theta=30\degree$')
plot_ql_approx(2000, 60, plas, ax=ax[1,0])
ax[1,0].set_title(r'$\theta=60\degree$')
plot_ql_approx(2000, 85, plas, ax=ax[1,1])
ax[1,1].set_title(r'$\theta=85\degree$')
lines = ax[0,0].get_lines()
fig.legend(lines, [l.get_label() for l in lines])
fig.suptitle('Validity of QL approximation: B=2000G, $n_p=1.51\cdot10^{11}$ cm$^{-3}$\n'r'$\tau=1$ for 200 GHz')
fig.tight_layout(rect=[0, 0.03, 1, 0.95])
fig, ax = plt.subplots(2, 2, figsize=(10,10))
plas = 5.3e10
plot_ql_approx(1300, 10, plas, ax=ax[0,0])
ax[0,0].set_title(r'$\theta=10\degree$')
plot_ql_approx(1300, 30, plas, ax=ax[0,1])
ax[0,1].set_title(r'$\theta=30\degree$')
plot_ql_approx(1300, 60, plas, ax=ax[1,0])
ax[1,0].set_title(r'$\theta=60\degree$')
plot_ql_approx(1300, 85, plas, ax=ax[1,1])
ax[1,1].set_title(r'$\theta=85\degree$')
lines = ax[0,0].get_lines()
fig.legend(lines, [l.get_label() for l in lines])
fig.suptitle('Validity of QL approximation: B=1300G, $n_p=5.3\cdot10^{10}$ cm$^{-3}$\n'r'$\tau=1$ for 45 GHz')
fig.tight_layout(rect=[0, 0.03, 1, 0.95])
c7 = np.genfromtxt('c7_adj.csv', delimiter=',', skip_header=1)
height = c7[:,0]
temperature = c7[:,1]
plasmaDens = c7[:,2]
protonDens = c7[:,4]
def dulk_k(freq):
lowT = 17.9 + np.log(temperature**1.5) - np.log(freq)
highT = 24.5 + np.log(temperature) - np.log(freq)
t1 = np.where(temperature < 2e5, lowT, highT)
kDulk = 9.78e-3 * plasmaDens * protonDens / freq**2 / temperature**1.5 * t1;
return kDulk
plt.figure()
plt.semilogy(height, dulk_k(10e9))
plt.xlabel('Height [cm]')
plt.ylabel('$\kappa_{ff}$ [cm$^{-1}$]')
def tau_eq_line(tauVal, freq):
ds = height[1:] - height[:-1]
tau = np.cumsum(dulk_k(freq)[-1:0:-1] * ds[::-1])
return height[-(1 + np.argmax(tau > tauVal))]
tauVal = 1
tauLine = []
for n in nu:
tauLine.append(tau_eq_line(tauVal, n))
fig, ax = plt.subplots(2, 1)
ax[0].semilogx(nu, tauLine, label=r'$tau$-line')
ax[0].set_xlabel('Frequency [Hz]')
ax[0].set_ylabel(r'$\tau=1$ altitude [cm]')
ax[0].set_title(r'Height of $\tau=1$ line for C7 model ''\n''(for an observer looking down from the corona)')
dens = ax[1].twinx()
ax[1].semilogy(height, temperature, 'g', label='T')
ax[1].set_xlim(-5e7, 4e8)
dens.semilogy(height, plasmaDens, 'r', label='$n_p$')
dens.set_ylabel('Plasma density [cm$^{-3}$]')
ax[1].set_ylabel('Temperature [K]')
ax[1].set_xlabel('Height [cm]')
ax[1].axvline(tauLine[-1])
ax[1].axvline(tauLine[np.searchsorted(nu, 10e9)-1])
fig.legend()
fig.tight_layout()
```
| github_jupyter |
# GIS web services
## Web Map Service / Web Coverage Service
A Web Map Service (WMS) is an Open Geospatial Consortium (OGC) standard that allows users to remotely access georeferenced map images via secure hypertext transfer protocol (HTTPS) requests.
DE Africa provides two types of maps services:
* Web Map Service (WMS) – A standard protocol for serving georeferenced map images over the internet that are generated from a map server using data from a GIS database. It is important to note that with a WMS, you are essentially getting an image of geospatial data (i.e. JPG, GIF, PNG file). While this has its uses, it is an image only, and therefore does not contain any of the underlying geospatial data that was used to create the image.
* Web Coverage Service (WCS) – A standard protocol for serving coverage data which returns data with its original semantics (instead of just pictures) which may be interpreted, extrapolated, etc., and not just portrayed. Essentially, a WCS can be thought of as the raw geospatial raster data behind an image. Using a WCS, you can pull the raw raster information you need to perform further analysis.
So, to give a quick summarisation, a WMS is simply an image of a map. You can almost think of this like taking a screenshot of Google Maps. A WCS is the raw raster data, so for example, if you are working with a WCS containing Landsat imagery, you can effectively chunk off the piece you are interested in and download the full multispectral image at the spatial resolution of the original image. The beauty of these services is that you can grab only the information you need. So, rather than retrieving a file that contains the data you are seeking and possibly much more, you can confine your download to only your area of interest, allowing you to get what you need and no more.
For more information, see this article on the [difference between GIS web services](https://www.l3harrisgeospatial.com/Learn/Blogs/Blog-Details/ArtMID/10198/ArticleID/16289/Web-Mapping-Service-Web-Coverage-Service-or-Web-Feature-Service-%E2%80%93-What%E2%80%99s-the-Difference).
The tutorials below cover setting up WMS and connecting to WCS.
## Tutorial: Setting up WMS
This tutorial shows how to set up the Web Map Services in QGIS, and use it with other data on your computer such as drone imagery, vector or raster data. This may be useful for you if you cannot upload the data to the DE Africa Map or the DE Africa Sandbox due to uploading due to size or internet bandwidth. It may also be useful if you feel more comfortable doing analysis in a GIS application.
Although this tutorial focuses on QGIS, the same process can be used to connect other Desktop GIS applications. [QGIS](https://qgis.org/en/site/) is a free and open-source desktop GIS application. You can download it from https://qgis.org/en/site/.
**How to connect to WMS using QGIS**
1. Launch QGIS.
2. On the Menu Bar click on **Layer**.
3. A sub-menu tab will show below Layer; click on **Add Layer**, choose **Add WMS/WMTS Layer**.
<img align="middle" src="_static/other_information/ows_tutorial_1.png" alt="QGIS - Add Layer" width="500">
4. A dialogue will open as shown below. Click on the **New** button.
<img align="middle" src="_static/other_information/ows_tutorial_2.png" alt="QGIS - New Layer" width="500">
5. A dialogue will open, as shown below: Provide the following details, these can be found at the URL https://ows.digitalearth.africa/.
`Name: DE Africa Services`
`URL: https://ows.digitalearth.africa/wms?version=1.3.0 `
<img align="middle" src="_static/other_information/ows_tutorial_3.png" alt="QGIS - Create New Connection" width="300">
6. After providing the details above, click on **OK**.
7. The previous dialogue will show up, in the dropdown above the **New** button, you will see DE Africa Services. If it is not there click the dropdown button below and select it.
8. The **Connect** button will be activated, click on it to load the layers. Anytime this page is open, because the connection has already been established, click on **Connect** to load the data.
<img align="middle" src="_static/other_information/ows_tutorial_4.png" alt="QGIS - View Connection" width="500">
9. The layer will be loaded as shown below in the dialogue.
10. Navigate through layers and choose the layer you will need to display on the Map Page.
11. After selecting the layer, click on **Add** button at the bottom of the dialogue.
12. Close the dialogue, the selected layer will be loaded onto the Map Page.
**For web developers**
The sites below provide instructions on how to load these map services onto your platform.
https://leafletjs.com/examples/wms/wms.html
https://openlayers.org/en/latest/examples/wms-tiled.html
https://docs.microsoft.com/en-us/bingmaps/v8-web-control/map-control-concepts/layers/wms-tile-layer-example
## Tutorial: How to connect WCS
This tutorial shows how to create a Web Coverage Service connection using QGIS.
1. Launch QGIS.
2. On the Menu Bar click on **Layer**.
3. A sub-menu tab will show below Layer; click on **Add Layer**, choose **Add WCS Layer**.
<img align="middle" src="_static/other_information/ows_tutorial_5.png" alt="QGIS - Add WCS" width="500">
4. Click on the **New** button.
5. A dialogue will open, as shown below: Provide the following details, these can be found at the URL https://ows.digitalearth.africa/
`Name: DE Africa Services`
`URL: https://ows.digitalearth.africa/wcs?version=2.1.0`
<img align="middle" src="_static/other_information/ows_tutorial_6.png" alt="QGIS - WCS Connection" width="300">
6. After providing the details above, click on **OK**.
7. The previous dialogue will show up, in the dropdown above the New button, you will see DE Africa Services, if it is not there click the dropdown button below and select it.
8. The **Connect** button will be activated, click on it to load the layers. Anytime this page is open, because the connection has already been established, click on the **Connect** button to load the data.
9. The layer will be loaded as shown below in the dialogue.
<img align="middle" src="_static/other_information/ows_tutorial_4.png" alt="QGIS - Loaded WCS" width="500">
10. Navigate through layers and choose the layer you will need to display on the Map Page. With WCS you can select Time and Format of Image.
11. After selecting the layer click on the **Add** button at the bottom of the dialogue.
| github_jupyter |
# Useful modules in standard library
---
**Programming Language**
- Core Feature
+ builtin with language,
+ e.g input(), all(), for, if
- Standard Library
+ comes preinstalled with language installer
+ e.g datetime, csv, Fraction
- Thirdparty Library
+ created by community to solve specific problem
+ e.g numpy, pandas, requests
## import statement
### Absolute import
```
%ls
import hello
import hello2
%cat hello.py
hello.hello()
%ls hello_package/
%cat hello_package/__init__.py
%cat hello_package/diff.py
import hello_package
hello_package.diff.diff
hello_package.diff.diff()
hello_package.diff
import hello_package.diff
diff.diff()
hello_package.diff.diff()
import hello_package.diff as hello_diff
hello_diff.diff()
from hello_package.diff import diff
diff()
patch()
from hello_package.diff import patch
patch()
```
### Relative import
```
import sys
sys.path
from .hello import hello
__name__
sys.__name__
```
## Date and Time
```
import datetime
datetime
datetime.datetime
datetime.datetime.now()
datetime.datetime.today()
datetime.date.today()
now = datetime.datetime.now()
now
now.year
now.microsecond
now.second
help(now)
yesterday = datetime.datetime(2016, 8, 1, 8, 32, 29)
yesterday
now == yesterday
now > yesterday
now < yesterday
now - yesterday
```
*timedelta is difference between two datetime*
```
delta = datetime.timedelta(days=3)
delta
yesterday + delta
now - delta
yesterday / now
yesterday // now
yesterday % now
yesterday * delta
help(datetime.timedelta)
help(datetime.datetime)
datetime.tzinfo('+530')
datetime.datetime(2016, 10, 20, tzinfo=datetime.tzinfo('+530'))
now.tzinfo
datetime.datetime.now()
datetime.datetime.utcnow()
```
## Files and Directories
```
f = open('hello.py')
open('non existing file')
f.read()
f.read()
f.seek(0)
f.read()
f.seek(0)
f.readlines()
f.seek(0)
f.readline()
f.readline()
f.readline()
f.close()
with open('hello.py') as _file:
for line in _file.readlines():
print(line)
```
**os**
```
import os
os.path.abspath('hello.py')
os.path.dirname(os.path.abspath('hello.py'))
os.path.join(os.path.dirname(os.path.abspath('hello.py')),
'another.py')
import glob
glob.glob('*.py')
glob.glob('*')
```
[email protected]
## CSV files
```
import csv
with open('../../data/countries.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
print(line)
with open('../../data/countries.csv') as csvfile:
reader = csv.DictReader(csvfile, fieldnames=['name', 'code'])
for line in reader:
print(line)
data = [
{'continent': 'asia', 'name': 'nepal'},
{'continent': 'asia', 'name': 'india'},
{'continent': 'asia', 'name': 'japan'},
{'continent': 'africa', 'name': 'chad'},
{'continent': 'africa', 'name': 'nigeria'},
{'continent': 'europe', 'name': 'greece'},
{'continent': 'europe', 'name': 'norway'},
{'continent': 'north america', 'name': 'canada'},
{'continent': 'north america', 'name': 'mexico'},
{'continent': 'south america', 'name': 'brazil'},
{'continent': 'south america', 'name': 'chile'}
]
# r == read
# w == write [ erase the file first ]
# a == apend
with open('countries.csv', 'w') as csvfile:
writer = csv.DictWriter(csvfile,
fieldnames=['name', 'continent'])
writer.writeheader()
writer.writerows(data)
# r == read
# w == write [ erase the file first ]
# a == apend
with open('countries.csv', 'a') as csvfile:
writer = csv.DictWriter(csvfile,
fieldnames=['name', 'continent'])
writer.writerow({'name': 'pakistan', 'continent': 'asia'})
```
## Fractions
```
import fractions
fractions.Fraction(3, 5)
from fractions import Fraction
Fraction(2, 3)
Fraction(1, 3) + Fraction(1, 3)
(1/3) + (1/3)
10/21
```
## Named Tuples
```
from collections import namedtuple
Color = namedtuple('Color', ['red', 'green', 'blue'])
button_color = Color(231, 211, 201)
button_color.red
button_color[0]
'This picture has Red:{0.red} Green:{0.green} and Blue:{0.blue}'.format(button_color)
```
## Builtin Methods
- all()
- any()
- chr()
- dict()
- dir()
- help()
- id()
- input()
- list()
- len()
- map()
- open()
- print()
- range()
- reversed()
- set()
- sorted()
- tuple()
- zip()
```
all([1, 0, 4])
all([1, 3, 4])
any([1, 0])
any([0, 0])
chr(64)
chr(121)
ord('6')
ord('*')
dict(name='kathmandu', country='nepal')
dir('')
help(''.title)
id('')
id(1)
input("Enter your number")
list((1, 3, 5))
list('hello')
len('hello')
len([1, 4, 5])
# open()
# see: above
print("test")
range(0, 9)
range(0, 99, 3)
list(range(0, 9))
reversed(list(range(0, 9)))
list(reversed(list(range(0, 9))))
''.join(reversed('hello'))
set([1, 5, 6, 7, 8, 7, 1])
tuple([1, 5, 2, 7, 3, 9])
sorted([1, 5, 2, 7, 3, 9])
sorted([1, 5, 2, 7, 3, 9], reverse=True)
data = [{'continent': 'asia', 'name': 'nepal', 'id':0},
{'continent': 'asia', 'name': 'india', 'id':5},
{'continent': 'asia', 'name': 'japan', 'id':8},
{'continent': 'africa', 'name': 'chad', 'id':2},
{'continent': 'africa', 'name': 'nigeria', 'id':7},
{'continent': 'europe', 'name': 'greece', 'id':1},
{'continent': 'europe', 'name': 'norway', 'id':6},
{'continent': 'north america', 'name': 'canada', 'id':3},
{'continent': 'north america', 'name': 'mexico', 'id':5},
{'continent': 'south america', 'name': 'brazil', 'id':4},
{'continent': 'south america', 'name': 'chile', 'id':7}]
def sort_by_name(first):
return first['name'] < first['continent']
sorted(data, key=sort_by_name)
list(zip([1, 2, 3], [2, 3, 4]))
```
**Lambda operations**
```
map(lambda x: x * 2, [1, 2, 3, 4])
list(map(lambda x: x * 2, [1, 2, 3, 4]))
lambda x: x + 4
def power2(x):
return x * 2
list(map(power2, [1, 2, 3, 4]))
```
*reduce is available in python2 only*
```
list(reduce(lambda x: x, [1, 4, 5, 6, 9]))
```
*for python 3*
```
from functools import reduce
reduce(lambda x, y: x + y, [1, 4, 5, 7, 8])
```
*filter*
```
list(filter(lambda x: x < 3, [1, 3, 5, 2, 8]))
```
| github_jupyter |
<a id='sect0'></a>
## <font color='darkblue'>Preface</font>
雖然我年紀已經不小, 但是追朔 [FP (Functional programming) 的歷史](https://en.wikipedia.org/wiki/Functional_programming#History), 我也只能算年輕:
> The lambda calculus, developed in the 1930s by Alonzo Church, is a formal system of computation built from function application.
<br/><br/>
<b><font size='3ptx'>既然歷史攸久, 因此一言難盡</font></b>, 在這邊只會 吹吹皮毛, 希望至少可以 cover 下面的內容:
* <font size='3ptx'><b><a href='#sect1'>Basic FP terminology (Function/Side effect/Closure ...)</a></b></font>
* <font size='3ptx'><b><a href='#sect2'>Lambda 用法 & 範例</a></b></font>
* <font size='3ptx'><b><a href='#sect3'>常常和 Lambda 一起使用的其他函數 map/filter/functools.reduce</a></b></font>
* <font size='3ptx'><b><a href='#sect4'>可以接受的 Lambda 使用時機</a></b></font>
* <font size='3ptx'><b><a href='#sect5'>Review 時會被打槍的用法</a></b></font>
* <font size='3ptx'><b><a href='#sect6'>FPU 簡介</a></b></font>
```
#!pip install fpu
from fpu.flist import *
from functools import partial
from typing import Sequence
from collections.abc import Iterable
```
<a id='sect1'></a>
## <font color='darkblue'>Basic FP terminology</font>
* <font size='3ptx'><b><a href='#sect1_1'>FP Terminology - Imperative vs Declarative</a></b></font>
* <font size='3ptx'><b><a href='#sect1_2'>FP Terminology - Closure</a></b></font>
* <font size='3ptx'><b><a href='#sect1_3'>FP Terminology - Currying</a></b></font>
<br/>
Functional programming has a [long history](https://en.wikipedia.org/wiki/Functional_programming#History). In a nutshell, **its a style of programming where you focus on transforming data through the use of small expressions that ideally don’t contain side effects.** In other words, when you call <font color='blue'>my_fun(1, 2)</font>, it will always return the same result. This is achieved by **immutable data** typical of a functional language.

([image source](https://www.fpcomplete.com/blog/2017/04/pure-functional-programming/))
<br/>
<a id='sect1_1'></a>
### <font color='darkgreen'>FP Terminology - Imperative vs Declarative</font>
你可以把 `Imperative` 與 `Decleartive` 想成將編程語言分類的一種方法. (<font color='brown'>顏色, 大小, 形狀 etc</font>). 稍後會說明這兩種陣營的語言寫起來的差別:

<br/>
#### <font size='3ptx'>Imperative</font>
底下是 [Imperative 語言的 wiki 說明](https://en.wikipedia.org/wiki/Imperative_programming):
> **Imperative programming** is like building assembly lines, which take some initial global state as raw material, apply various specific transformations, mutations to it as this material is pushed through the line, and at the end comes off the end product, the final global state, that represents the result of the computation. Each step needs to change, rotate, massage the workpiece precisely one specific way, so that it is prepared for subsequent steps downstream. Every step downstream depend on every previous step, and their order is therefore fixed and rigid. Because of these dependencies, an individual computational step has not much use and meaning in itself, but only in the context of all the others, and to understand it, one must understand how the whole line works.
<br/>
現今大部分的語言都屬於 imperative 陣營, 看描述很辛苦, 透過比對兩者的編程 style 可以很容易發現兩者不同. 底下是 imperative style 的編成方式:
```
salaries = [
(True, 9000), # (Is female, salary)
(False, 12000),
(False, 6000),
(True, 14000),
]
def imperative_way(salaries):
'''Gets the sum of salaries of female and male.
Args:
salaries: List of salary. Each element is of tuple(is_female: bool, salary: int)
Returns:
Tuple(Sum of female salaries, Sum of male salaries)
'''
female_sum = male_sum = 0
for is_female, salary in salaries:
if is_female:
female_sum += salary
else:
male_sum += salary
return (female_sum, male_sum)
imperative_way(salaries)
```
#### <font size='3ptx'>Declarative</font>
底下是 [Declarative 語言的 wiki 說明](https://en.wikipedia.org/wiki/Declarative_programming):
> A style of building the structure and elements of computer programs—that expresses the logic of a computation without describing its control flow.
```
def add(a, b):
return a + b
def salary_sum(is_female: bool):
'''Return calculator to sum up the salary based on female/male
Args:
is_female: True to return calculator to sum up salaries of female
False to return calculator to sum up salaries of male.
Returns:
Calculator to sum up salaries.
'''
def _salary_sum(salaries):
flist = fl(salaries)
return flist.filter(lambda t: t[0] == is_female) \
.map(lambda t: t[1]) \
.foldLeft(0, add)
return _salary_sum
def declarative_way(salaries):
return (
salary_sum(is_female=True)(salaries), # Salary sum of female
salary_sum(is_female=False)(salaries), # Salary sum of male
)
declarative_way(salaries)
```
<a id='sect1_2'></a>
### <font color='darkgreen'>FP Terminology - Closure</font> ([back](#sect1))
A [**Closure**](https://en.wikipedia.org/wiki/Closure_(computer_programming)) is a function which **simply creates a scope that allows the function to access and manipulate the variables in enclosing scopes**. Normally, you will follow below steps to create a Closure in Python:
* We have to create a nested function (a function inside another function).
* This nested function has to refer to a variable defined inside the enclosing function.
* The enclosing function has to return the nested function
<br/>
簡單說就是你的 Function object 綁定一個封閉的 name space (<font color='brown'>[這篇](https://dboyliao.medium.com/%E8%81%8A%E8%81%8A-python-closure-ebd63ff0146f)介紹還蠻清楚, 可以參考</font>), 直接來看範例理解:
```
def contain_N(n):
def _inner(sequence: Sequence):
return n in sequence
return _inner
contain_5 = contain_N(5)
contain_10 = contain_N(10)
my_datas = [1, 2, 3, 4, 5]
print(f'my_data={my_datas} contains 5? {contain_5(my_datas)}')
print(f'my_data={my_datas} contains 10? {contain_10(my_datas)}')
```
上面的函數 `contain_N` 返回一個 closure. 該 closure 綁訂了變數 `n`. (<font color='blue'>contain_5</font> <font color='brown'>綁定的 `n` 為 5;</font> <font color='blue'>contain_10</font> <font color='brown'>綁定的 `n` 為 10</font>)
<a id='sect1_3'></a>
### <font color='darkgreen'>FP Terminology - Currying</font> ([back](#sect1))
底下是 wiki 上對 [**currying**](https://en.wikipedia.org/wiki/Curry_(programming_language)) 的說明:
> <b>Currying is like a kind of incremental binding of function arguments</b>. It is the technique of breaking down the evaluation of a function that takes multiple arguments into evaluating a sequence of single-argument functions.
<br/>
很可惜在 Python 預設的函數並不支援這個特性, 幸運的是在模組 [**functools**](https://docs.python.org/3/library/functools.html) 有提供 [partial](https://docs.python.org/3/library/functools.html#functools.partial) 來模擬 currying 的好處. 直接來看範例:
```
def sum_salary_by_sex(*args):
def _sum_salary_by_sex(is_female: bool, salaries: Sequence) -> int:
return sum(map(lambda t: t[1], filter(lambda t: t[0]==is_female, salaries)))
if len(args) == 1:
return partial(_sum_salary_by_sex, args[0])
return _sum_salary_by_sex(*args)
# Get female salaries
sum_salary_by_sex(True, salaries)
# # Get female salaries in currying way
sum_salary_by_sex(True)(salaries)
# Get male salaries
sum_salary_by_sex(False, salaries)
# Get male salaries in curring way
sum_salary_by_sex(False)(salaries)
sum_salary_by_female = sum_salary_by_sex(True)
sum_salary_by_male = sum_salary_by_sex(False)
sum_salary_by_female(salaries)
sum_salary_by_male(salaries)
```
我們透過 currying 的特性便可以得到新的函數 <font color='blue'>sum_salary_by_female</font> 與 <font color='blue'>sum_salary_by_male</font>. 是不是很方便?
<a id='sect2'></a>
## <font color='darkblue'>Lambda 用法 & 範例</font> ([back](#sect0))
[**Lambda**](https://docs.python.org/3/reference/expressions.html#lambda) 在 Python 是一個關鍵字用來定義 匿名函數. 底下是使用 lambda 匿名函數的注意事項:
* It can only contain expressions and can’t include statements ([No Statesments](#sect2_1)) in its body.
* [It is written as a single line of execution](#sect2_2).
* [It does not support type annotations.](#sect2_3)
* [It can be immediately invoked](#sect2_4) ([IIFE](https://en.wikipedia.org/wiki/Immediately_invoked_function_expression)).
<a id='sect2_1'></a>
### <font color='darkgreen'>No Statements</font>
<b><font size='3ptx' color='darkred'>A lambda function can’t contain any statements</font></b>. In a lambda function, statements like `return`, `pass`, `assert`, or `raise` will raise a [**SyntaxError**](https://realpython.com/invalid-syntax-python/) exception. Here’s an example of adding assert to the body of a lambda:
```python
>>> (lambda x: assert x == 2)(2)
File "<input>", line 1
(lambda x: assert x == 2)(2)
^
SyntaxError: invalid syntax
```
<br/>
<a id='sect2_2'></a>
### <font color='darkgreen'>Single Expression</font>
<font size='3ptx'><b>In contrast to a normal function, a Python lambda function is a single expression</b></font>. Although, in the body of a lambda, you can spread the expression over several lines using parentheses or a multiline string, it remains a single expression:
```python
>>> (lambda x:
... (x % 2 and 'odd' or 'even'))(3)
'odd'
```
<br/>
The example above returns the string 'odd' when the lambda argument is odd, and 'even' when the argument is even. It spreads across two lines because it is contained in a set of parentheses, but it remains a single expression.
<a id='sect2_3'></a>
### <font color='darkgreen'>Type Annotations</font>
If you’ve started adopting type hinting, which is now available in Python, then you have another good reason to prefer normal functions over Python lambda functions. Check out [**Python Type Checking** (Guide)]((https://realpython.com/python-type-checking/#hello-types)) to get learn more about Python type hints and type checking. In a lambda function, there is no equivalent for the following:
```python
def full_name(first: str, last: str) -> str:
return f'{first.title()} {last.title()}'
```
<br/>
Any type error with <font color='blue'>full_name()</font> can be caught by tools like [**mypy**](http://mypy-lang.org/) or [**pyre**](https://pyre-check.org/), whereas a [**SyntaxError**](https://realpython.com/invalid-syntax-python/) with the equivalent lambda function is raised at runtime:
```python
>>> lambda first: str, last: str: first.title() + " " + last.title() -> str
File "<stdin>", line 1
lambda first: str, last: str: first.title() + " " + last.title() -> str
SyntaxError: invalid syntax
```
<br/>
Like trying to include a statement in a lambda, adding type annotation immediately results in a [**SyntaxError**](https://realpython.com/invalid-syntax-python/) at runtime.
<a id='sect2_4'></a>
### <font color='darkgreen'>IIFE</font>
You’ve already seen several examples of [immediately invoked function execution](https://developer.mozilla.org/en-US/docs/Glossary/IIFE):
```python
>>> (lambda x: x * x)(3)
9
```
<br/>
<b>It’s a direct consequence of a lambda function being callable as it is defined</b>. For example, this allows you to pass the definition of a Python lambda expression to a higher-order function like [map()](https://docs.python.org/3/library/functions.html#map), [filter()](https://docs.python.org/3/library/functions.html#filter), or [**functools**.reduce()](https://docs.python.org/3/library/functools.html#functools.reduce), or to a `key function`.
<a id='sect3'></a>
## <font color='darkblue'>常常和 Lambda 一起使用的函數 map/filter/functools.reduce 與 key functions</font> ([back](#sect0))
* <font size='3ptx'><b><a href='#sect3_1'>Built-in map/filter & functools.reduce</a></b></font>
* <font size='3ptx'><b><a href='#sect3_2'>key functions 淺談</a></b></font>
<a id='sect3_1'></a>
### <font color='darkgreen'>Built-in map/filter & functools.reduce</font>
[map](https://docs.python.org/3/library/functions.html#map) 與 [filter](https://docs.python.org/3/library/functions.html#filter) 是 Python 預設就支援的函數. [reduce](https://docs.python.org/3/library/functools.html#functools.reduce) 則必須到 [**functools**](https://docs.python.org/3/library/functools.html) 套件下去 import. 底下是這三個函數使用的示意圖:
[image source](https://www.reddit.com/r/ProgrammerHumor/comments/55ompo/map_filter_reduce_explained_with_emojis/)

<br/>
對我來說 [reduce](https://docs.python.org/3/library/functools.html#functools.reduce) 更像是:

<br/>
來看幾個例子吧:
```
class Beef:
def __init__(self):
self.is_veg = False
def cook(self): return 'Hamburger'
class Potato:
def __init__(self):
self.is_veg = True
def cook(self): return 'French Fries'
class Chicken:
def __init__(self):
self.is_veg = False
def cook(self): return 'Fried chicken'
class Corn:
def __init__(self):
self.is_veg = True
def cook(self): return 'Popcorn'
food_ingredients = [Beef(), Potato(), Chicken(), Corn()]
```
#### <font size='3ptx'>map 範例</font>
[map](https://docs.python.org/3/library/functions.html#map) 需要你提供一個 function 與一個 iterable 物件 (延伸閱讀: [`The Iterator Protocol`](https://www.pythonmorsels.com/iterator-protocol/)), 接著 map 會將 iterable 的每個 element 丟掉你提供的 function 並收集 return 結果並回傳另一個 iterable 物件給你. 底下我們的範例:
* **function**: `lambda food_ingredient: food_ingredient.cook()`
* **iterable 物件**: `food_ingredients`
```
# map(function, iterable, ...):
# Return an iterator that applies function to every item of iterable, yielding the results.
map_iter = map(lambda food_ingredient: food_ingredient.cook(), food_ingredients)
isinstance(map_iter, Iterable) # map_iter is an iterable object.
list(map_iter)
```
#### <font size='3ptx'>filter 範例</font>
[filter](https://docs.python.org/3/library/functions.html#filter) 函數透過你提供的 function 來選擇傳入 iterable 物件中的 element (element 傳進 function 得到 True 的會被選擇).
```
# filter(function, iterable):
# Construct an iterator from those elements of iterable for which function returns true.
veg_iter = filter(lambda food_ingredient: food_ingredient.is_veg, food_ingredients)
isinstance(veg_iter, Iterable) # veg_iter is an iterable object.
# 只有 Proato 與 Corn 被選擇, 因為他們 `is_veg` == True
list(veg_iter)
```
#### <font size='3ptx'>reduce 範例</font>
reduce 函數的用法用講的不好說, 直接看範例:
```
from functools import reduce
# If initializer is not given, the first item of iterable object is returned.
f = lambda a, b: a+b
reduce(
f,
[1, 2, 3, 4, 5]
)
```
上面的執行過程可以看成:

<br/>
事實上你可以提供初始值, 例如:
```
reduce(
lambda a, b: a+b,
[1, 2, 3, 4, 5],
10,
)
```
更多有關這個函數的用法, 可以參考 [**Python's reduce(): From Functional to Pythonic Style**](https://realpython.com/python-reduce-function/)
<a id='sect3_2'></a>
### <font color='darkgreen'>key functions 淺談</font>
<b><font size='3ptx'>Key functions in Python are higher-order functions that take a parameter `key` as a named argument.</font></b>
在 Python 許多的函數有提供參數 `key` 便是 lambda 使用的場合之一, 例如:
* [sort()](https://docs.python.org/3/library/stdtypes.html#list.sort): list method
* [sorted()](https://docs.python.org/3/library/functions.html#staticmethod), [min()](https://docs.python.org/3/library/functions.html#min), [max()](https://docs.python.org/3/library/functions.html#max): built-in functions
* [nlargest()](https://docs.python.org/3/library/heapq.html#heapq.nlargest) and [nsmallest()](https://docs.python.org/3/library/heapq.html#heapq.nsmallest): in the Heap queue algorithm module [**heapq**](https://docs.python.org/3/library/heapq.html)
<br/>
來看幾個範例來理解用法.
#### sorted
考慮你有如下的 list:
```
ids = ['id1', 'id2', 'id100', 'id30', 'id3', 'id22']
```
你希望透過 `id<num>` 的 `<num>` 來進行排序 (ascending), 這時 [sorted](https://docs.python.org/3/library/functions.html#sorted) 便可以派上用場:
* **sorted(iterable, /, *, <font color='red'>key=None</font>, reverse=False)**: Return a new sorted list from the items in iterable.
```
sorted(
ids,
key=lambda id_str: int(id_str[2:]), # 比對時使用的值
)
```
懂一個 Key function 的用法, 其他就是依此類推了, 例如取出最大 `<num>` 的 id 就會是:
```
max(
ids,
key=lambda id_str: int(id_str[2:]))
```
<a id='sect4'></a>
## <font color='darkblue'>可以接受的 Lambda 使用時機</font> ([back](#sect0))
底下是 readability 文件對 Lambda 使用的建議:
* [**2.10 Lambda Functions**](https://engdoc.corp.google.com/eng/doc/devguide/py/style/index.md?cl=head#lambdas)
> Okay to use them for one-liners. If the code inside the lambda function is longer than 60-80 chars, it's probably better to define it as a regular [nested function](https://engdoc.corp.google.com/eng/doc/devguide/py/style/index.md?cl=head#lexical-scoping). <br/><br/>
> For common operations like multiplication, use the functions from the operator module instead of lambda functions. For example, prefer [**operator**.mul](https://docs.python.org/3/library/operator.html#operator.mul) to `lambda x, y: x * y`.
### <font color='darkgreen'>Alternatives to Lambdas</font>
個人在 readability review 不會特別 high light lambda 的使用, 但是有收到一些 review comment 建議可以使用其他方式來取代 lambda 用法. 這邊來看幾個範例.
#### <font size='3ptx'>Map</font>
**The built-in function [map()](https://docs.python.org/3/library/functions.html#map) takes a function as a first argument and applies it to each of the elements of its second argument, an iterable**. Examples of iterables are strings, lists, and tuples. For more information on iterables and iterators, check out [**Iterables and Iterators**](https://realpython.com/lessons/looping-over-iterables/).
[map()](https://docs.python.org/3/library/functions.html#map) returns an iterator corresponding to the transformed collection. As an example, if you wanted to transform a list of strings to a new list with each string capitalized, you could use [map()](https://docs.python.org/3/library/functions.html#map), as follows:
```
# Map example
list(map(lambda x: x.capitalize(), ['cat', 'dog', 'cow']))
# Proposed way in using list comprehension
[w.capitalize() for w in ['cat', 'dog', 'cow']]
```
#### <font size='3ptx'>Filter</font>
The built-in function [filter()](https://docs.python.org/3/library/functions.html#filter), another classic functional construct, can be converted into a list comprehension. It takes a [predicate](https://en.wikipedia.org/wiki/Predicate_(mathematical_logic)) as a first argument and an iterable as a second argument. It builds an iterator containing all the elements of the initial collection that satisfies the predicate function. Here’s an example that filters all the even numbers in a given list of integers:
```
# Filter example
even = lambda x: x%2 == 0
list(filter(even, range(11)))
# Proposed way in using list comprehension
[x for x in range(11) if x%2 == 0]
```
#### <font size='3ptx'>Reduce</font>
Since Python 3, [reduce()](https://docs.python.org/3/library/functools.html#functools.reduce) has gone from a built-in function to a [**functools**](https://docs.python.org/3/library/functools.html#functools.reduce) module function. As [map()](https://docs.python.org/3/library/functions.html#map) and [filter()](https://docs.python.org/3/library/functions.html#filter), its first two arguments are respectively a function and an iterable. It may also take an initializer as a third argument that is used as the initial value of the resulting accumulator. For each element of the iterable, [reduce()](https://docs.python.org/3/library/functools.html#functools.reduce) applies the function and accumulates the result that is returned when the iterable is exhausted.
To apply [reduce()](https://docs.python.org/3/library/functools.html#functools.reduce) to a list of pairs and calculate the sum of the first item of each pair, you could write this:
```python
>>> import functools
>>> pairs = [(1, 'a'), (2, 'b'), (3, 'c')]
>>> functools.reduce(lambda acc, pair: acc + pair[0], pairs, 0)
6
```
<br/>
A more idiomatic approach using a [generator expression](https://www.python.org/dev/peps/pep-0289/), as an argument to [sum()](https://docs.python.org/3/library/functions.html#sum) in the example, is the following:
```
pairs = [(1, 'a'), (2, 'b'), (3, 'c')]
sum(x[0] for x in pairs)
generator = (x[0] for x in pairs)
generator
iterator = pairs.__iter__()
iterator
```
有關 generator 與 iterator 的介紹與說明, 可以參考 "[**How to Use Generators and yield in Python**](https://realpython.com/introduction-to-python-generators/)" 與 "[**The Python `for` loop**](https://realpython.com/python-for-loop/#the-python-for-loop)"
<a id='sect5'></a>
## <font color='darkblue'>Review 時會被打槍的用法</font> ([back](#sect0))
* <font size='3ptx'><b><a href='#sect5_1'>g-long-lambda</a></b></font>
* <font size='3ptx'><b><a href='#sect5_2'>unnecessary-lambda</a></b></font>
<br/>
**<font color='darkred'>The next sections illustrate a few examples of lambda usages that should be avoided</font>**. Those examples might be situations where, in the context of Python lambda, the code exhibits the following pattern:
* It doesn’t follow the Python style guide ([PEP 8](https://peps.python.org/pep-0008/))
* It’s cumbersome and difficult to read.
* It’s unnecessarily clever at the cost of difficult readability.
<a id='sect5_1'></a>
### <font color='darkgreen'>g-long-lambda</font>
> ([link](go/gpylint-faq#g-long-lambda)) Used when a tricky functional-programming construct may be too long.
* <b><font color='darkred'>Negative:</font></b>
```python
users = [
{'name': 'John', 'age': 40, 'sex': 1},
{'name': 'Ken', 'age': 26, 'sex': 0},
...
]
sorted_users = sorted(
users,
key=lambda u: (u['age'], u['sex']) if is_employee(u) else (u['age'], u['name']),
)
```
<a id='sect5_2'></a>
### <font color='darkgreen'>unnecessary-lambda</font>
> ([link](go/gpylint-faq#unnecessary-lambda)) Lambda may not be necessary
* <b><font color='darkred'>Negative:</font></b>
```python
foo = {'x': 1, 'y': 2}
self.mock_fn_that_returns_dict = lambda: foo.copy()
```
* <b><font color='green'>Example:</font></b>
```python
foo = {'x': 1, 'y': 2}
self.mock_fn_that_returns_dict = foo.copy
```
<a id='sect6'></a>
## <font color='darkblue'>FPU 簡介</font> ([back](#sect0))
* <font size='3ptx'><b><a href='#sect6_1'>functional composition</a></b></font>
* <font size='3ptx'><b><a href='#sect6_2'>Built-in filter/map/reduce in collection object</a></b></font>
<br/>
[**fpu**](https://github.com/johnklee/fpu) (<font color='brown'>Functional programming utility</font>) 是我維護的一個 Python package 用來提升 Python 對 FP 的支援. 這邊帶幾個範例來了解它帶來的好處.
<a id='sect6_1'></a>
### <font color='darkgreen'>functional composition</font>
Functional composition 的特性 (延伸閱讀: "[**Function composition and lazy execution**](https://ithelp.ithome.com.tw/articles/10235556)") 讓你可以方便的串接函數來產生新的函數. 考慮你有以下代碼:
```
data_set = [{'values':[1, 2, 3]}, {'values':[4, 5]}]
# Imperative
def min_max_imp(data_set):
"""Picks up the maximum of each element and calculate the minimum of them."""
max_list = []
for d in data_set:
max_list.append(max(d['values']))
return min(max_list)
# Max of [1, 2, 3] -> [3], max of [4, 5] -> [5] => Got [3, 5]
# Min of [3, 5] => 3
min_max_imp(data_set)
```
事實上這是兩個函數 min/max 串接的結果. 透過 FPU, 你可以改寫成:
```
# FP
from fpu.fp import *
from functools import reduce, partial
# compose2(f, g) = f(g())
min_max = compose2(
partial(reduce, min), # [3, 5] -> [3]
partial(map, lambda d: max(d['values']))) # [{'values':[1, 2, 3]}, {'values':[4, 5]}] -> [3, 5]
min_max(data_set)
```
<a id='sect6_2'></a>
### <font color='darkgreen'>Built-in filter/map/reduce in collection object</font>
FPU 中的 collection 物件自帶 filter/map/reduce 函數. 考慮下面問題:
```
# 請找出在每個 element 都有出現過的元素 (character).
arr = ['abcdde', 'baccd', 'eeabg']
def gemstones_imp(arr):
# 1) Collect unique character of each element
set_list = []
for s in arr:
set_list.append(set(list(s)))
# 2) Keep searching overlapping characters among all set
uset = set_list[0]
for aset in set_list[1:]:
uset = uset & aset
return ''.join(uset)
gemstones_imp(arr)
```
使用 FPU 改寫變成:
```
from fpu.flist import *
def gemstones_dec(arr):
rlist = fl(arr)
return ''.join(
rlist.map(
# 將每個 element 轉成 set
lambda e: set(list(e))
).reduce(
# 依序找出每個 set 共用的 character
lambda a, b: a & b
)
)
gemstones_dec(arr)
```
## <font color='darkblue'>Supplement</font>
* [Medium - 聊聊 Python Closure](https://dboyliao.medium.com/%E8%81%8A%E8%81%8A-python-closure-ebd63ff0146f)
* [FPU - Functional programming utility (slides)](https://docs.google.com/presentation/d/1e8JkC1253jmfWIwppDbWFpy51m4P-uM0LZxqpPQCUjs/edit?usp=sharing&resourcekey=0-krY5MI7h9oGfveN4D8AN_w)
* [Introduction of FP in Python (notebook)](https://nbviewer.org/github/johnklee/oo_dp_lesson/blob/master/lessons/Test_and_function_programming_in_Python/py_fp.ipynb)
| github_jupyter |
# Inheritance with the Gaussian Class
To give another example of inheritance, take a look at the code in this Jupyter notebook. The Gaussian distribution code is refactored into a generic Distribution class and a Gaussian distribution class. Read through the code in this Jupyter notebook to see how the code works.
The Distribution class takes care of the initialization and the read_data_file method. Then the rest of the Gaussian code is in the Gaussian class. You'll later use this Distribution class in an exercise at the end of the lesson.
Run the code in each cell of this Jupyter notebook. This is a code demonstration, so you do not need to write any code.
```
class Distribution:
def __init__(self, mu=0, sigma=1):
""" Generic distribution class for calculating and
visualizing a probability distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
self.mean = mu
self.stdev = sigma
self.data = []
def read_data_file(self, file_name):
"""Function to read in data from a txt file. The txt file should have
one number (float) per line. The numbers are stored in the data attribute.
Args:
file_name (string): name of a file to read from
Returns:
None
"""
with open(file_name) as file:
data_list = []
line = file.readline()
while line:
data_list.append(int(line))
line = file.readline()
file.close()
self.data = data_list
import math
import matplotlib.pyplot as plt
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev)
# initialize two gaussian distributions
gaussian_one = Gaussian(25, 3)
gaussian_two = Gaussian(30, 2)
# initialize a third gaussian distribution reading in a data efile
gaussian_three = Gaussian()
gaussian_three.read_data_file('numbers.txt')
gaussian_three.calculate_mean()
gaussian_three.calculate_stdev()
# print out the mean and standard deviations
print(gaussian_one.mean)
print(gaussian_two.mean)
print(gaussian_one.stdev)
print(gaussian_two.stdev)
print(gaussian_three.mean)
print(gaussian_three.stdev)
# plot histogram of gaussian three
gaussian_three.plot_histogram_pdf()
# add gaussian_one and gaussian_two together
gaussian_one + gaussian_two
```
| github_jupyter |
# SWI - single layer
Test case - strange behaviour output control package
When requesting both budget and head data via the OC package, the solution
differs from when only the head is requested.
This is set via the 'words' parameter in the OC package.
```
%matplotlib inline
import os
import sys
import numpy as np
import flopy.modflow as mf
import flopy.utils as fu
import matplotlib.pyplot as plt
os.chdir('C:\\Users\\Bas\\Google Drive\\USGS\\FloPy\\slope1D')
sys.path.append('C:\\Users\\Bas\\Google Drive\\USGS\\FloPy\\basScript') # location of gridObj
modelname = 'run1swi2'
exe_name = 'mf2005'
workspace = 'data'
ml = mf.Modflow(modelname, exe_name=exe_name, model_ws=workspace)
nstp = 10000 #[]
perlen = 10000 #[d]
ssz = 0.2 #[]
Q = 0.005 #[m3/d]
nlay = 1
nrow = 1
ncol = 4
delr = 1.
delc = 1.
dell = 1.
top = np.array([[-1.,-1., -0.7, -0.4]], dtype = np.float32)
bot = np.array(top-dell, dtype = np.float32).reshape((nlay,nrow,ncol))
initWL = 0. # inital water level
lrcQ1 = np.recarray(1, dtype = mf.ModflowWel.get_default_dtype())
lrcQ1[0] = (0, 0, ncol-1, Q) #LRCQ, Q[m**3/d]
lrchc = np.recarray(2, dtype = mf.ModflowGhb.get_default_dtype())
lrchc[0]=(0, 0, 0, -top[0,0]*0.025, 0.8 / 2.0 * delc)
lrchc[1]=(0, 0, 1, -top[0,0]*0.025, 0.8 / 2.0 * delc)
lrchd = np.recarray(2, dtype = mf.ModflowChd.get_default_dtype())
lrchd[0]=(0, 0, 0, -top[0,0]*0.025, -top[0,0]*0.025)
lrchd[1]=(0, 0, 1, -top[0,0]*0.025, -top[0,0]*0.025)
zini = -0.9*np.ones((nrow,ncol))
isource = np.array([[-2,-2, 0, 0]])
ml = mf.Modflow(modelname, version='mf2005', exe_name=exe_name)
discret = mf.ModflowDis(ml, nrow=nrow, ncol=ncol, nlay=nlay, delr=delr, delc=delc,
laycbd=[0], top=top, botm=bot,
nper=1, perlen=perlen, nstp=nstp)
bas = mf.ModflowBas(ml, ibound=1, strt=(initWL-zini)*0.025)
bcf = mf.ModflowBcf(ml, laycon=[0], tran=[4.0])
wel = mf.ModflowWel(ml, stress_period_data={0:lrcQ1})
#ghb = mf.ModflowGhb(ml, stress_period_data={0:lrchc})
chd = mf.ModflowChd(ml, stress_period_data={0:lrchd})
swi = mf.ModflowSwi2(ml, nsrf=1, istrat=1, toeslope=0.02, tipslope=0.04, nu=[0, 0.025],
zeta=[zini], ssz=ssz, isource=isource, nsolver=1)
oc = mf.ModflowOc(ml,save_head_every=nstp)
pcg = mf.ModflowPcg(ml)
ml.write_input() #--write the model files
m = ml.run_model(silent=True, report=True)
headfile = modelname + '.hds'
hdobj = fu.HeadFile(headfile)
head = hdobj.get_data(idx=0)
zetafile = modelname + '.zta'
zobj = fu.CellBudgetFile(zetafile)
zeta = zobj.get_data(idx=0, text=' ZETASRF 1')[0]
print 'isource: ', swi.isource.array
print 'init zeta: ', swi.zeta[0].array
print 'init fresh hd: ', bas.strt.array
print 'final head: ', head[0, 0, :]
print 'final zeta: ', zeta[0,0,:]
print 'final BGH head: ', - 40. * (head[0, 0, :])
import gridobj as grd
gr = grd.gridobj(discret)
fig = plt.figure(figsize=(16, 8), dpi=300, facecolor='w', edgecolor='k')
ax = fig.add_subplot(111)
gr.plotgrLC(ax)
gr.plothdLC(ax,zini[0,:],label='Initial')
gr.plothdLC(ax,zeta[0,0,:], label='SWI2')
gr.plothdLC(ax,head[0, 0, :], label='feshw head')
gr.plothdLC(ax,-40. * (head[0, 0, :]), label='Ghyben-Herzberg')
ax.axis(gr.limLC([-0.2,0.2,-0.2,0.2]))
leg = ax.legend(loc='lower left', numpoints=1)
leg._drawFrame = False
```
VOLUMETRIC SWI ZONE BUDGET FOR ENTIRE MODEL
AT END OF TIME STEP10000 IN STRESS PERIOD 1
ZONE 1
-----------------------------------------------------------------------------------
CUMULATIVE VOLUMES L**3 RATES FOR THIS TIME STEP L**3/T
------------------ ------------------------
IN: IN:
--- ---
BOUNDARIES = 50.0015 BOUNDARIES = 5.0000E-03
CONSTANT HEAD = 0.0000 CONSTANT HEAD = 0.0000
ZONE CHANGE = 2.0018E-02 ZONE CHANGE = 0.0000
ZONE CHG TIP/TOE = 5.9999E-03 ZONE CHG TIP/TOE = 0.0000
ZONE MIXING = 0.0000 ZONE MIXING = 0.0000
TOTAL IN = 50.0275 TOTAL IN = 5.0000E-03
OUT: OUT:
---- ----
BOUNDARIES = 0.0000 BOUNDARIES = 0.0000
CONSTANT HEAD = 49.8714 CONSTANT HEAD = 5.0000E-03
ZONE CHANGE = 0.1546 ZONE CHANGE = 0.0000
ZONE CHG TIP/TOE = 5.9999E-03 ZONE CHG TIP/TOE = 0.0000
ZONE MIXING = 0.0000 ZONE MIXING = 0.0000
TOTAL OUT = 50.0320 TOTAL OUT = 5.0000E-03
IN - OUT = -4.5395E-03 IN - OUT = 9.3132E-10
PERCENT DISCREPANCY = -0.01 PERCENT DISCREPANCY = 0.00
VOLUMETRIC SWI ZONE BUDGET FOR ENTIRE MODEL
AT END OF TIME STEP10000 IN STRESS PERIOD 1
ZONE 2
-----------------------------------------------------------------------------------
CUMULATIVE VOLUMES L**3 RATES FOR THIS TIME STEP L**3/T
------------------ ------------------------
IN: IN:
--- ---
BOUNDARIES = 0.0000 BOUNDARIES = 0.0000
CONSTANT HEAD = 0.0000 CONSTANT HEAD = 0.0000
ZONE CHANGE = 0.1546 ZONE CHANGE = 0.0000
ZONE CHG TIP/TOE = 1.8834E-02 ZONE CHG TIP/TOE = 0.0000
ZONE MIXING = 0.0000 ZONE MIXING = 0.0000
TOTAL IN = 0.1734 TOTAL IN = 0.0000
OUT: OUT:
---- ----
BOUNDARIES = 0.0000 BOUNDARIES = 0.0000
CONSTANT HEAD = 0.1300 CONSTANT HEAD = 0.0000
ZONE CHANGE = 3.2853E-02 ZONE CHANGE = 0.0000
ZONE CHG TIP/TOE = 5.9999E-03 ZONE CHG TIP/TOE = 0.0000
ZONE MIXING = 0.0000 ZONE MIXING = 0.0000
TOTAL OUT = 0.1689 TOTAL OUT = 0.0000
IN - OUT = 4.5692E-03 IN - OUT = 0.0000
PERCENT DISCREPANCY = 2.67 PERCENT DISCREPANCY = 0.00
HEAD WILL BE SAVED ON UNIT 51 AT END OF TIME STEP10000, STRESS PERIOD 1
1
VOLUMETRIC BUDGET FOR ENTIRE MODEL AT END OF TIME STEP10000, STRESS PERIOD 1
------------------------------------------------------------------------------
CUMULATIVE VOLUMES L**3 RATES FOR THIS TIME STEP L**3/T
------------------ ------------------------
IN: IN:
--- ---
STORAGE = 0.0000 STORAGE = 0.0000
CONSTANT HEAD = 1.4306E-02 CONSTANT HEAD = 0.0000
WELLS = 50.0015 WELLS = 5.0000E-03
SWIADDTOCH = 149.7491 SWIADDTOCH = 1.5000E-02
TOTAL IN = 199.7649 TOTAL IN = 2.0000E-02
OUT: OUT:
---- ----
STORAGE = 0.0000 STORAGE = 0.0000
CONSTANT HEAD = 199.7248 CONSTANT HEAD = 2.0000E-02
WELLS = 0.0000 WELLS = 0.0000
SWIADDTOCH = 4.9679E-02 SWIADDTOCH = 0.0000
TOTAL OUT = 199.7745 TOTAL OUT = 2.0000E-02
IN - OUT = -9.6436E-03 IN - OUT = 0.0000
PERCENT DISCREPANCY = -0.00 PERCENT DISCREPANCY = 0.00
| github_jupyter |
# Bank customers clustering project
This dataset contains data on 5000 customers. The data include customer demographic information (age, income, etc.), the customer's relationship with the bank (mortgage, securities account, etc.), and the customer response to the last personal loan campaign (Personal Loan). Among these 5000 customers, only 480 (= 9.6%) accepted the personal loan that was offered to them in the earlier campaign.
The dataset has a mix of numerical and categorical attributes, but all categorical data are represented with numbers. Moreover, some of the predictor variables are heavily skewed (long - tailed), making the data pre-processing an interesting yet not too challenging aspect of the data.
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
from matplotlib import cm
df=pd.read_csv("Bank_Personal_Loan_Modelling.csv")
df.head()
```
Column informations:
- ID - Customer id
- Age - Customers age
- Experience - Number of years of professional experience
- Income - Annual income of the customer (x1000 USD)
- ZIP Code - Home Address ZIP code
- Family - Family size of the customer
- CCAVG - Avg. spending on credit cards per month (x1000 USD)
- Education - Education Level. 1: Undergrad; 2: Graduate; 3: Advanced/Professional
- Mortgage - Value of house mortgage if any. (x1000 USD)
- Personal Loan - Did this customer accept the personal loan offered in the last campaign?
- Securities Account - Does the customer have a securities account with the bank?(1-yes,0-no)
- CD Account - Does the customer have a certificate of deposit (CD) account with the bank?(1-yes,0-no)
- Online - Does the customer use internet banking facilities? (1-yes,0-no)
- CreditCard - Does the customer use a credit card issued by this Bank? (1-yes,0-no)
```
#renaming columns
df.columns=['id', 'age', 'experience', 'income', 'zip_code', 'family', 'cc_avg',
'education', 'mortgage', 'personal_loan', 'securities_account',
'cd_account', 'online', 'credit_card']
df2=df.copy()
#Converting values
df2["income"]=df["income"]*1000
df2["cc_avg"]=df["cc_avg"]*1000
df2["mortgage"]=df["mortgage"]*1000
df2.head()
```
## Dataset exploring
```
df2.shape
df2.info()
df2["income"].describe()
#visualize outliers with boxplot
plt.boxplot(df['income'])
# Upper outlier threshold Q3 + 1.5(IQR)
max_threshold=98000 + 1.5*(98000 - 39000)
max_threshold
# Removing outliers
df3=df2[df2.income<max_threshold]
# recalculate summary statistics
df3['income'].describe()
df3["cc_avg"].describe()
#visualize outliers with boxplot
plt.boxplot(df['cc_avg'])
# Upper outlier threshold Q3 + 1.5(IQR)
max_threshold=2500+ 1.5*(2500 - 700)
max_threshold
# Removing outliers
df4=df3[df3.cc_avg<max_threshold]
# recalculate summary statistics
df4['cc_avg'].describe()
df4["mortgage"].describe()
df4["mortgage"].value_counts()
df4.shape
```
## Data visualization
```
#Ploting scatterplot
title = 'Income by year experience '
plt.figure(figsize=(12,9))
sns.scatterplot(df4.experience,df4.income,hue=df4.experience).set_title(title)
plt.ioff()
#Bar plot of average income by education
df4.groupby('education')["income"].mean().plot.bar(color=[ 'red', 'cyan',"magenta"])
plt.show()
# Count customers presonal loan based on size of familiy
count_delayed=df4.groupby('family')['personal_loan'].apply(lambda x: (x==1).sum()).reset_index(name='Number of customer with personal loan')
color = cm.viridis(np.linspace(.4, .8, 30))
count_delayed= count_delayed.sort_values("Number of customer with personal loan" , ascending=[False])
count_delayed.plot.bar(x='family', y='Number of customer with personal loan', color=color , figsize=(12,7))
#Histogram of customers younger then 35 with mortgage
df4[df4.age<35]["mortgage"].plot.hist(histtype="step")
```
Almost 700 hundres customers with mortgage between 0$ and 50 000$.
## Preparing features
```
features=df4[["age","experience","income","cc_avg"]]
```
### Scaling features
```
# min-max scaling
from sklearn.preprocessing import MinMaxScaler, RobustScaler
scaler=MinMaxScaler()
data_scaled_array=scaler.fit_transform(features)
scaled=pd.DataFrame(data_scaled_array, columns=features.columns)
scaled.head()
```
## KMeans cluster
```
from sklearn.cluster import KMeans, AffinityPropagation
import warnings
warnings.filterwarnings("ignore")
#finding sum of the squared distance between centroid and each member of the cluster
k_range = range(1,10)
sse =[]
for k in k_range:
km = KMeans(n_clusters=k)
km.fit(scaled)
sse.append(km.inertia_)
sse
#Ploting kmeans elbow method
plt.plot(k_range, sse, 'bx-')
plt.xlabel('k')
plt.ylabel('Sum_of_squared_distances')
plt.title('Elbow Method For Optimal k')
plt.show()
#Ploting silhouette score
from sklearn.metrics import silhouette_samples, silhouette_score
clusters_range = range(2,15)
random_range = range(0,20)
results =[]
for c in clusters_range:
for r in random_range:
clusterer = KMeans(n_clusters=c, random_state=r)
cluster_labels = clusterer.fit_predict(scaled)
silhouette_avg = silhouette_score(scaled, cluster_labels)
#print("For n_clusters =", c," and seed =", r, "\nThe average silhouette_score is :", silhouette_avg)
results.append([c,r,silhouette_avg])
result = pd.DataFrame(results, columns=["n_clusters","seed","silhouette_score"])
pivot_km = pd.pivot_table(result, index="n_clusters", columns="seed",values="silhouette_score")
plt.figure(figsize=(15,6))
sns.heatmap(pivot_km, annot=True, linewidths=.5, fmt='.3f', cmap=sns.cm.rocket_r)
plt.tight_layout()
```
The heatmap above shows silhouette scores for various combinations of random state and number of clusters. The highest scores are for 2 and 3 clusters and they are relatively insensitive to seed.
Since there is small differences I will chose 3 clusters to get more insight into data.
```
km=KMeans(n_clusters=3)
predict=km.fit_predict(features)
predict
km.cluster_centers_
features["cluster"]=predict
features.head()
grouped_km = features.groupby(['cluster']).mean().round()
grouped_km
#Ploting scatterplot
title = 'Income by age '
plt.figure(figsize=(12,9))
sns.scatterplot(features.age,features.income,hue=features.cluster).set_title(title)
plt.ioff()
#Ploting scatterplot
title = 'Average spending on month by experience '
plt.figure(figsize=(12,9))
sns.scatterplot(features.experience,features.cc_avg,hue=features.cluster).set_title(title)
plt.ioff()
```
| github_jupyter |
*Accompanying code examples of the book "Introduction to Artificial Neural Networks and Deep Learning: A Practical Guide with Applications in Python" by [Sebastian Raschka](https://sebastianraschka.com). All code examples are released under the [MIT license](https://github.com/rasbt/deep-learning-book/blob/master/LICENSE). If you find this content useful, please consider supporting the work by buying a [copy of the book](https://leanpub.com/ann-and-deeplearning).*
Other code examples and content are available on [GitHub](https://github.com/rasbt/deep-learning-book). The PDF and ebook versions of the book are available through [Leanpub](https://leanpub.com/ann-and-deeplearning).
```
%load_ext watermark
%watermark -a 'Sebastian Raschka' -v -p torch
```
# Model Zoo -- CNN Gender Classifier (VGG16 Architecture, CelebA) with Data Parallelism
There are multiple ways of leveraging multiple GPUs when using PyTorch. One of these approaches is to send a copy of the model to each available GPU and split the minibatches across using `DataParallel`.
To break it down into conceptual steps, this is what `DataParallel` does
1. each GPU performs a forward pass on a chunk of the minibatch (on a copy of the model) to obtain the predictions;
2. the first/default GPU gathers these predictions from all GPUs to compute the loss of each minibatch-chunk with respect to the true labels (this is done on the first/default GPU, because we typically define the loss, like `torch.nn.CrossEntropyLoss` outside the model);
3. each GPU then peforms backpropagation to compute the gradient of the loss on their-subbatch with respect to the neural network weights;
3. the first GPU sums up the gradients obtained from each GPU (computer engineers usually refer to this step as "reduce");
4. the first GPU updates the weights in the neural network via gradient descent and sends copies to the individual GPUs for the next round.
While the list above may look a bit complicated at first, the `DataParallel` class automatically takes care of it all, and it is very easy to use in practice.
### Data Parallelism vs regular Backpropagation
Note that using `DataParallel` will result in slightly different models compared to regular backpropagation. The reason is that via data parallelism, we combine the gradients from 4 individual forward and backward runs to update the model. In regular backprop, we would update the model after each minibatch. The following figure illustrates regular backpropagation showing 2 iterations:

The next figure shows one model update iteration with `DataParallel` assuming 2 GPUs:

### Implementation Details
To use `DataParallel`, in the "Model" section (i.e., the corresponding code cell) we replace
```python
model.to(device)
```
with
```python
model = VGG16(num_features=num_features, num_classes=num_classes)
if torch.cuda.device_count() > 1:
print("Using", torch.cuda.device_count(), "GPUs")
model = nn.DataParallel(model)
```
and let the `DataParallel` class take care of the rest. Note that in order for this to work, the data currently needs to be on the first cuda device, "cuda:0". Otherwise, we will get a `RuntimeError: all tensors must be on devices[0]`. Hence, we define `device` below, which we use to transfer the input data to during training. Hence, make sure you set
```python
device = torch.device("cuda:0")
```
and not
```python
device = torch.device("cuda:1")
```
(or any other CUDA device number), so that in the training loop, we can use
```python
for i, (features, targets) in enumerate(data_loader):
features = features.to(device)
targets = targets.to(device)
```
If you look at the implementation part
```python
#### DATA PARALLEL START ####
model = VGG16(num_features=num_features, num_classes=num_classes)
if torch.cuda.device_count() > 1:
print("Using", torch.cuda.device_count(), "GPUs")
model = nn.DataParallel(model)
#### DATA PARALLEL END ####
model.to(device)
#### DATA PARALLEL START ####
cost_fn = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
```
you notice that the `CrossEntropyLoss` (we could also use the one implemented in nn.functional) is not part of the model. Hence, the loss will be computed on the device where the target labels are, which is the default device (usually the first GPU). This is the reason why the outputs are gathered on the first/default GPU. I sketched a more detailed outline of the whole process below:

### Speed Comparison
- Using the same batch size as in the 1-GPU version of this code, means that if we have four GPUs, the 64-batch dataset gets split into four 16-batch sized datasets that will be distributed across the different GPUs. I noticed that the computation time is approximately half for 4 GPUs compared to 1 GPU (using GeForce 1080Ti cards).
- When I multiply the batch size by 4 in the `DataParallel` version, so that each GPU gets a minibatch of size 64, I notice that the model trains approximately 3x faster on 4 GPUs compared to the single GPU version.
### Network Architecture
The network in this notebook is an implementation of the VGG-16 [1] architecture on the CelebA face dataset [2] to train a gender classifier.
References
- [1] Simonyan, K., & Zisserman, A. (2014). Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556.
- [2] Zhang, K., Tan, L., Li, Z., & Qiao, Y. (2016). Gender and smile classification using deep convolutional neural networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops (pp. 34-38).
The following table (taken from Simonyan & Zisserman referenced above) summarizes the VGG19 architecture:

**Note that the CelebA images are 218 x 178, not 256 x 256. We resize to 128x128**
## Imports
```
import os
import time
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision import transforms
import matplotlib.pyplot as plt
from PIL import Image
if torch.cuda.is_available():
torch.backends.cudnn.deterministic = True
```
## Dataset
### Downloading the Dataset
Note that the ~200,000 CelebA face image dataset is relatively large (~1.3 Gb). The download link provided below was provided by the author on the official CelebA website at http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html.
1) Download and unzip the file `img_align_celeba.zip`, which contains the images in jpeg format.
2) Download the `list_attr_celeba.txt` file, which contains the class labels
3) Download the `list_eval_partition.txt` file, which contains training/validation/test partitioning info
### Preparing the Dataset
```
df1 = pd.read_csv('list_attr_celeba.txt', sep="\s+", skiprows=1, usecols=['Male'])
# Make 0 (female) & 1 (male) labels instead of -1 & 1
df1.loc[df1['Male'] == -1, 'Male'] = 0
df1.head()
df2 = pd.read_csv('list_eval_partition.txt', sep="\s+", skiprows=0, header=None)
df2.columns = ['Filename', 'Partition']
df2 = df2.set_index('Filename')
df2.head()
df3 = df1.merge(df2, left_index=True, right_index=True)
df3.head()
df3.to_csv('celeba-gender-partitions.csv')
df4 = pd.read_csv('celeba-gender-partitions.csv', index_col=0)
df4.head()
df4.loc[df4['Partition'] == 0].to_csv('celeba-gender-train.csv')
df4.loc[df4['Partition'] == 1].to_csv('celeba-gender-valid.csv')
df4.loc[df4['Partition'] == 2].to_csv('celeba-gender-test.csv')
img = Image.open('img_align_celeba/000001.jpg')
print(np.asarray(img, dtype=np.uint8).shape)
plt.imshow(img);
```
### Implementing a Custom DataLoader Class
```
class CelebaDataset(Dataset):
"""Custom Dataset for loading CelebA face images"""
def __init__(self, csv_path, img_dir, transform=None):
df = pd.read_csv(csv_path, index_col=0)
self.img_dir = img_dir
self.csv_path = csv_path
self.img_names = df.index.values
self.y = df['Male'].values
self.transform = transform
def __getitem__(self, index):
img = Image.open(os.path.join(self.img_dir,
self.img_names[index]))
if self.transform is not None:
img = self.transform(img)
label = self.y[index]
return img, label
def __len__(self):
return self.y.shape[0]
```
Running the VGG16 on this dataset with a minibatch size of 64 uses approximately 6.6 Gb of GPU memory. However, since we will split the batch size over for GPUs now, along with the model, we can actually comfortably use 64*4 as the batch size.
```
# Note that transforms.ToTensor()
# already divides pixels by 255. internally
custom_transform = transforms.Compose([transforms.CenterCrop((178, 178)),
transforms.Resize((128, 128)),
#transforms.Grayscale(),
#transforms.Lambda(lambda x: x/255.),
transforms.ToTensor()])
train_dataset = CelebaDataset(csv_path='celeba-gender-train.csv',
img_dir='img_align_celeba/',
transform=custom_transform)
valid_dataset = CelebaDataset(csv_path='celeba-gender-valid.csv',
img_dir='img_align_celeba/',
transform=custom_transform)
test_dataset = CelebaDataset(csv_path='celeba-gender-test.csv',
img_dir='img_align_celeba/',
transform=custom_transform)
BATCH_SIZE=64*torch.cuda.device_count()
train_loader = DataLoader(dataset=train_dataset,
batch_size=BATCH_SIZE,
shuffle=True,
num_workers=4)
valid_loader = DataLoader(dataset=valid_dataset,
batch_size=BATCH_SIZE,
shuffle=False,
num_workers=4)
test_loader = DataLoader(dataset=test_dataset,
batch_size=BATCH_SIZE,
shuffle=False,
num_workers=4)
```
Note that for DataParallel to work, the data currently needs to be on the first cuda device, "cuda:0". Otherwise, we will get a `RuntimeError: all tensors must be on devices[0]`. Hence, we define `device` below, which we use to transfer the input data to during training.
```
device = torch.device("cuda:0")
torch.manual_seed(0)
num_epochs = 2
for epoch in range(num_epochs):
for batch_idx, (x, y) in enumerate(train_loader):
print('Epoch:', epoch+1, end='')
print(' | Batch index:', batch_idx, end='')
print(' | Batch size:', y.size()[0])
x = x.to(device)
y = y.to(device)
break
```
## Model
```
##########################
### SETTINGS
##########################
# Hyperparameters
random_seed = 1
learning_rate = 0.001
num_epochs = 3
# Architecture
num_features = 128*128
num_classes = 2
##########################
### MODEL
##########################
class VGG16(torch.nn.Module):
def __init__(self, num_features, num_classes):
super(VGG16, self).__init__()
# calculate same padding:
# (w - k + 2*p)/s + 1 = o
# => p = (s(o-1) - w + k)/2
self.block_1 = nn.Sequential(
nn.Conv2d(in_channels=3,
out_channels=64,
kernel_size=(3, 3),
stride=(1, 1),
# (1(32-1)- 32 + 3)/2 = 1
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=64,
out_channels=64,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2),
stride=(2, 2))
)
self.block_2 = nn.Sequential(
nn.Conv2d(in_channels=64,
out_channels=128,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=128,
out_channels=128,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2),
stride=(2, 2))
)
self.block_3 = nn.Sequential(
nn.Conv2d(in_channels=128,
out_channels=256,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=256,
out_channels=256,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=256,
out_channels=256,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=256,
out_channels=256,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2),
stride=(2, 2))
)
self.block_4 = nn.Sequential(
nn.Conv2d(in_channels=256,
out_channels=512,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=512,
out_channels=512,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=512,
out_channels=512,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=512,
out_channels=512,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2),
stride=(2, 2))
)
self.block_5 = nn.Sequential(
nn.Conv2d(in_channels=512,
out_channels=512,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=512,
out_channels=512,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=512,
out_channels=512,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=512,
out_channels=512,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2),
stride=(2, 2))
)
self.classifier = nn.Sequential(
nn.Linear(512*4*4, 4096),
nn.ReLU(),
nn.Linear(4096, 4096),
nn.ReLU(),
nn.Linear(4096, num_classes)
)
for m in self.modules():
if isinstance(m, torch.nn.Conv2d):
#n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
#m.weight.data.normal_(0, np.sqrt(2. / n))
m.weight.detach().normal_(0, 0.05)
if m.bias is not None:
m.bias.detach().zero_()
elif isinstance(m, torch.nn.Linear):
m.weight.detach().normal_(0, 0.05)
m.bias.detach().detach().zero_()
def forward(self, x):
x = self.block_1(x)
x = self.block_2(x)
x = self.block_3(x)
x = self.block_4(x)
x = self.block_5(x)
logits = self.classifier(x.view(-1, 512*4*4))
probas = F.softmax(logits, dim=1)
return logits, probas
torch.manual_seed(random_seed)
#### DATA PARALLEL START ####
model = VGG16(num_features=num_features, num_classes=num_classes)
if torch.cuda.device_count() > 1:
print("Using", torch.cuda.device_count(), "GPUs")
model = nn.DataParallel(model)
#### DATA PARALLEL END ####
model.to(device)
#### DATA PARALLEL START ####
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
```
## Training
```
def compute_accuracy(model, data_loader):
correct_pred, num_examples = 0, 0
for i, (features, targets) in enumerate(data_loader):
features = features.to(device)
targets = targets.to(device)
logits, probas = model(features)
_, predicted_labels = torch.max(probas, 1)
num_examples += targets.size(0)
correct_pred += (predicted_labels == targets).sum()
return correct_pred.float()/num_examples * 100
start_time = time.time()
for epoch in range(num_epochs):
model.train()
for batch_idx, (features, targets) in enumerate(train_loader):
features = features.to(device)
targets = targets.to(device)
### FORWARD AND BACK PROP
logits, probas = model(features)
cost = F.cross_entropy(logits, targets)
optimizer.zero_grad()
cost.backward()
### UPDATE MODEL PARAMETERS
optimizer.step()
### LOGGING
if not batch_idx % 50:
print ('Epoch: %03d/%03d | Batch %04d/%04d | Cost: %.4f'
%(epoch+1, num_epochs, batch_idx,
len(train_loader), cost))
model.eval()
with torch.set_grad_enabled(False): # save memory during inference
print('Epoch: %03d/%03d | Train: %.3f%% | Valid: %.3f%%' % (
epoch+1, num_epochs,
compute_accuracy(model, train_loader),
compute_accuracy(model, valid_loader)))
print('Time elapsed: %.2f min' % ((time.time() - start_time)/60))
print('Total Training Time: %.2f min' % ((time.time() - start_time)/60))
```
## Evaluation
```
model.eval()
with torch.set_grad_enabled(False): # save memory during inference
print('Test accuracy: %.2f%%' % (compute_accuracy(model, test_loader)))
for batch_idx, (features, targets) in enumerate(test_loader):
features = features
targets = targets
break
plt.imshow(np.transpose(features[0], (1, 2, 0)))
logits, probas = model(features.to(device)[0, None])
print('Probability Female %.2f%%' % (probas[0][0]*100))
%watermark -iv
```
| github_jupyter |
```
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
```
## Read data
Your task is to find parameters $\beta$ of a linear model that approximates the following observations. Each observation is decribed by only one input feature $x_{1}$.
```
# Read data for the file
data = pd.read_csv("https://raw.githubusercontent.com/lutik-inc/notebooks/master/chunk_4/task_02/data.csv")
# Display the first 5 rows of the data
data.head()
# Get a matrix of the input feature of the observations
x1 = data[['x1']].values
# Get a vector of target values you need to approximate
y = data['y'].values
# Plot the observations
plt.figure(figsize=(9, 4.5))
plt.scatter(x1, y, linewidth=3, label="Observations")
plt.xlabel(r'$x_{1}$', size=14)
plt.xticks(size=14)
plt.ylabel(r"y", size=14)
plt.yticks(size=14)
plt.legend(loc='best', fontsize=14)
plt.tight_layout()
plt.show()
```
## Create matrix X
Now you have the vector of targets $y = (y_{1}, y_{2}, ..., y_{n})^{T}$. Create a matrix $X$ that is defined as
$$
X = \left( \begin{array}{ccccc}
1 & x_{11} & x_{12} & \cdots & x_{1d} \\
1 & x_{21} & x_{22} & \cdots & x_{2d} \\
\vdots & \vdots & \vdots & \cdots & \vdots \\
1 & x_{n1} & x_{n2} & \cdots & x_{nd} \\
\end{array} \right)
$$
Rememder that your observations have only one input feature $x_{i1}$.
**Hint:** Use `np.ones()` function to generate a vector of ones $(1, 1, ..., 1)^{T}$. To concatenate two matrices $a$ and $b$ use function `np.hstack((a, b))`.
```
X = ... # Put your code here (2 lines)
print("Output:")
X[:2, :]
```
Expected otput :
`[[ 1. , -1. ],
[ 1. , -0.959]]`
## Init $\beta$
```
beta = np.array([0, 0])
print("Output:")
beta
```
## Loss
Calculate the loss function defined as:
$$
L(\beta) = \frac{1}{n} (X\beta - y)^{T}(X\beta - y)
$$
**Hint:** To multiply two matrices $a$ and $b$ use functions `a.dot(b)` or `np.dot(a, b)`.
```
def loss_func(beta):
loss = ... # Put your code here (1 line)
return loss
loss = loss_func(beta)
print("Output:")
loss
```
Expected otput :
`2.19695098`
## Gradient of the loss function
Calculate gradient of the loss function $\nabla L$ defined as:
$$
\nabla L = \frac{\partial L}{\partial \beta} = \frac{2}{n} X^{T} (X\beta - y)
$$
**Hint:** To multiply two matrices $a$ and $b$ use functions `a.dot(b)` or `np.dot(a, b)`.
```
def grad_func(beta):
grad = ... # Put your code here (1 line)
return grad
grad = grad_func(beta)
print("Output:")
grad
```
Expected otput :
`[-1.40972 , -1.52095704]`
## Gradient descent
Now implement gradient descent for the approximation. The update rule for $\beta$ is:
$$
\beta_{(t+1)} = \beta_{(t)} - \alpha \nabla L(\beta_{(t)})
$$
Estimate how many iterations $t$ it is needed to satisfy the following stop criterion:
$$
| L(\beta_{(t)} - L(\beta_{(t-1)} | < 10^{-4}
$$
**Hint:** To multiply two matrices $a$ and $b$ use functions `a.dot(b)` or `np.dot(a, b)`.
```
alpha = 0.1 # learning rate
beta = np.array([0, 0]) # init beta, again :)
beta_collector = [beta]
loss_collector = [loss_func(beta)]
for i_iter in range(1000): # for each iteration
# Calculate gradient
grad = ... # Put your code here (1 line)
# Update beta
beta = ... # Put your code here (1 line)
# Save new beta
beta_collector.append(beta)
# Calculate loss
loss = ... # Put your code here (1 line)
# Save loss
loss_collector.append(loss)
# Stop criterion
if np.abs( loss_collector[-1] - loss_collector[-2] ) < 10**-4:
print("Iteration: ", i_iter)
print("Beta: ", beta)
print("Loss: ", loss)
break
# Plot learning curve
plt.figure(figsize=(9, 4.5))
plt.plot(loss_collector, linewidth=3, label="GD", color='C3')
plt.xlabel(r'Iteration number', size=14)
plt.xticks(size=14)
plt.ylabel(r"Loss function value", size=14)
plt.yticks(size=14)
plt.legend(loc='best', fontsize=14, ncol=2)
plt.tight_layout()
plt.show()
```
| github_jupyter |
###### Content under Creative Commons Attribution license CC-BY 4.0, code under MIT license (c)2014 L.A. Barba, G.F. Forsyth, C.D. Cooper.
# Spreading out
Welcome back! This is the third lesson of the course [Module 4](https://github.com/numerical-mooc/numerical-mooc/tree/master/lessons/04_spreadout), _Spreading out: parabolic PDEs,_ where we study the numerical solution of diffusion problems.
In the first two notebooks, we looked at the 1D heat equation, and solved it numerically using [*explicit*](https://nbviewer.jupyter.org/github/numerical-mooc/numerical-mooc/blob/master/lessons/04_spreadout/04_01_Heat_Equation_1D_Explicit.ipynb) and [*implicit*](https://nbviewer.jupyter.org/github/numerical-mooc/numerical-mooc/blob/master/lessons/04_spreadout/04_02_Heat_Equation_1D_Implicit.ipynb) schemes. We learned that implicit schemes are unconditionally stable, and we are free to choose any time step. —Wait: _any time step?_ Remember, we still want to capture the physics of the problem accurately. So although stability concerns do not limit the time step, it still has to be small enough to satisfy any accuracy concerns.
We are now ready to graduate to two dimensions! In the remaining lessons of this course module, we will study the 2D heat equation and reaction-diffusion equation. Like before, we start with explicit methods (this lesson) and then move to implicit methods (next lesson). Let's get started.
## 2D Heat conduction
The equation of heat conduction in 2D is:
$$
\begin{equation}
\rho c_p \frac{\partial T}{\partial t} = \frac{\partial}{\partial x} \left( \kappa_x \frac{\partial T}{\partial x} \right) + \frac{\partial}{\partial y} \left(\kappa_y \frac{\partial T}{\partial y} \right)
\end{equation}
$$
where $\rho$ is the density, $c_p$ is the heat capacity and $\kappa$ is the thermal conductivity.
If the thermal conductivity $\kappa$ is constant, then we can take it outside of the spatial derivative and the equation simplifies to:
$$
\begin{equation}
\frac{\partial T}{\partial t} = \alpha \left(\frac{\partial^2 T}{\partial x^2} + \frac{\partial^2 T}{\partial y^2} \right)
\end{equation}
$$
where $\alpha = \frac{\kappa}{\rho c_p}$ is the thermal diffusivity. The thermal diffusivity describes the ability of a material to conduct heat vs. storing it.
Does that equation have a familiar look to it? That's because it's the same as the diffusion equation. There's a reason that $\alpha$ is called the thermal *diffusivity*! We're going to set up an interesting problem where 2D heat conduction is important, and set about to solve it with explicit finite-difference methods.
### Problem statement
Removing heat out of micro-chips is a big problem in the computer industry. We are at a point in technology where computers can't run much faster because the chips might start failing due to the high temperature. This is a big deal! Let's study the problem more closely.
We want to understand how heat is dissipated from the chip with a very simplified model. Say we consider the chip as a 2D plate of size $1{\rm cm}\times 1{\rm cm}$, made of Silicon: $\kappa = 159{\rm W/m C}$, $c_p = 0.712\cdot 10^3 {\rm J/kg C}$, $\rho = 2329{\rm kg/m}^3$, and diffusivity $\alpha \approx 10^{-4}{\rm m}^2{/\rm s}$. Silicon melts at $1414{\rm C}$, but chips should of course operate at much smaller temperatures. The maximum temperature allowed depends on the processor make and model; in many cases, the maximum temperature is somewhere between $60{\rm C}$ and $\sim70{\rm C}$, but better CPUs are recommended to operate at a [maximum of $80{\rm C}$](http://www.pugetsystems.com/blog/2009/02/26/intel-core-i7-temperatures/) (like the Intel Core i7, for example).
We're going to set up a somewhat artificial problem, just to demonstrate an interesting numerical solution. Say the chip is in a position where on two edges (top and right) it is in contact with insulating material. On the other two edges the chip is touching other components that have a constant temperature of $T=100{\rm C}$ when the machine is operating. Initially, the chip is at room temperature $(20{\rm C})$. *How long does it take for the center of the chip to reach $70{\rm C}$?*
<img src='./figures/2dchip.svg' width='400px'>
#### Figure 1: Simplified microchip problem setup.
Let's use what we have learned to tackle this problem!
## 2D Finite differences
Everything you learned about finite-difference schemes in [Notebook 1 of Module 2](https://nbviewer.jupyter.org/github/numerical-mooc/numerical-mooc/blob/master/lessons/02_spacetime/02_01_1DConvection.ipynb) still applies, but now there are two spatial dimensions. We will need to build a 2D grid of discrete points to compute the solution on.
We will use a 2D Cartesian grid: one that consists of two families of (grid) lines parallel to the two spatial directions. Two lines (of different families) intersect on one and only one grid node (this is called a _structured_ grid). In the $x$ direction, the discretization uses $i=0, \cdots N_x$ lines, and in the $y$ direction we have $j=0, \cdots N_y$ lines. A given node on the grid will now have two spatial coordinates, and we need two indices: for the two lines that intersect at that node. For example, the middle point in the figure below would be $T_{i,j}$.
<img src="./figures/2dgrid.svg">
#### Figure 2. Nodal coordinates in 2 dimensions
### Explicit scheme in 2D
Recall from above that the 2D heat equation is
$$
\frac{\partial T}{\partial t} = \alpha \left(\frac{\partial^2 T}{\partial x^2} + \frac{\partial^2 T}{\partial y^2} \right)
$$
Let's write this out discretized using forward difference in time, and central difference in space, using an explicit scheme. You should be able write this out yourself, without looking—if you need to look, it means you still need to write more difference equations by your own hand!
$$
\begin{equation}
\frac{T^{n+1}_{i,j} - T^n_{i,j}}{\Delta t} = \alpha \left( \frac{T^n_{i+1, j} - 2T^n_{i,j} + T^n_{i-1,j}}{\Delta x^2} + \frac{T^n_{i, j+1} - 2T^n_{i,j} + T^n_{i,j-1}}{\Delta y^2}\right)
\end{equation}
$$
Rearranging the equation to solve for the value at the next time step, $T^{n+1}_{i,j}$, yields
$$
\begin{equation}
T^{n+1}_{i,j}= T^n_{i,j} + \alpha \left( \frac{\Delta t}{\Delta x^2} (T^n_{i+1, j} - 2T^n_{i,j} + T^n_{i-1,j}) + \\\frac{\Delta t}{\Delta y^2} (T^n_{i, j+1} - 2T^n_{i,j} + T^n_{i,j-1})\right)
\end{equation}
$$
That's a little messier than 1D, but still recognizable.
Up until now, we've used stencils to help visualize how a scheme will advance the solution for one time step. Stencils in 2D are a little harder to draw, but hopefully the figure below will guide your understanding of this method: we are using five grid points at time step $n$ to obtain the solution on one point at time step $n+1$.
<img src="./figures/2d_stencil.svg">
#### Figure 3: 2D Explicit Stencil
Similar to all of the 1D explicit methods we've used, the solution at $T^{n+1}_{i,j}$ is updated using only known values from the current solution at time $n$. This is straightforward to implement in code, but will be subject to stability limitations on the time step that you can choose. We'll study an implicit method in the next lesson.
### Boundary Conditions
Whenever we reach a point that interacts with the boundary, we apply the boundary condition. As in the previous notebook, if the boundary has Dirichlet conditions, we simply impose the prescribed temperature at that point. If the boundary has Neumann conditions, we approximate them with a finite-difference scheme.
Remember, Neumann boundary conditions prescribe the derivative in the normal direction. For example, in the problem described above, we have $\frac{\partial T}{\partial y} = q_y$ in the top boundary and $\frac{\partial T}{\partial x} = q_x$ in the right boundary, with $q_y = q_x = 0$ (insulation).
Thus, at every time step, we need to enforce
$$
\begin{equation}
T_{i,end} = q_y\cdot\Delta y + T_{i,end-1}
\end{equation}
$$
and
$$
\begin{equation}
T_{end,j} = q_x\cdot\Delta x + T_{end-1,j}
\end{equation}
$$
Write the finite-difference discretization of the boundary conditions yourself, and confirm that you can get the expressions above.
### Stability
Before doing any coding, let's revisit stability constraints. We saw in the first notebook of this series that the 1D explicit discretization of the diffusion equation was stable as long as $\alpha \frac{\Delta t}{(\Delta x)^2} \leq \frac{1}{2}$. In 2D, this constraint is even tighter, as we need to add them in both directions:
$$
\begin{equation}
\alpha \frac{\Delta t}{(\Delta x)^2} + \alpha \frac{\Delta t}{(\Delta y)^2} < \frac{1}{2}.
\end{equation}
$$
Say that the mesh has the same spacing in $x$ and $y$, $\Delta x = \Delta y = \delta$. In that case, the stability condition is:
$$
\begin{equation}
\alpha \frac{\Delta t}{\delta^2} < \frac{1}{4}
\end{equation}
$$
## Code implementation
### Array storage
The physical problem has two dimensions, so we also store the temperatures in two dimensions: in a 2D array.
We chose to store it with the $y$ coordinates corresponding to the rows of the array and $x$ coordinates varying with the columns (this is just a code design decision!). If we are consistent with the stencil formula (with $x$ corresponding to index $i$ and $y$ to index $j$), then $T_{i,j}$ will be stored in array format as `T[j,i]`.
This might be a little confusing as most of us are used to writing coordinates in the format $(x,y)$, but our preference is to have the data stored so that it matches the physical orientation of the problem. Then, when we make a plot of the solution, the visualization will make sense to us, with respect to the geometry of our set-up. That's just nicer than to have the plot rotated!
<img src="./figures/rowcolumn.svg" width="400px">
#### Figure 4: Row-column data storage
As you can see on Figure 4 above, if we want to access the value $18$ we would write those coordinates as $(x_2, y_3)$. You can also see that its location is the 3rd row, 2nd column, so its array address would be `T[3,2]`.
Again, this is a design decision. However you can choose to manipulate and store your data however you like; just remember to be consistent!
### Code time!
Now, to some coding! First, we have a little function that will advance the solution in time with a forward-time, centered-space scheme, and will monitor the center of the plate to tell us when it reaches $70{\rm C}$. Let's start by setting up our Python compute environment.
```
import numpy
from matplotlib import pyplot
%matplotlib inline
# Set the font family and size to use for Matplotlib figures.
pyplot.rcParams['font.family'] = 'serif'
pyplot.rcParams['font.size'] = 16
def ftcs(T0, nt, dt, dx, dy, alpha):
"""
Computes and returns the temperature distribution
after a given number of time steps.
Explicit integration using forward differencing
in time and central differencing in space, with
Neumann conditions (zero-gradient) on top and right
boundaries and Dirichlet conditions on bottom and
left boundaries.
Parameters
----------
T0 : numpy.ndarray
The initial temperature distribution as a 2D array of floats.
nt : integer
Maximum number of time steps to compute.
dt : float
Time-step size.
dx : float
Grid spacing in the x direction.
dy : float
Grid spacing in the y direction.
alpha : float
Thermal diffusivity.
Returns
-------
T : numpy.ndarray
The temperature distribution as a 2D array of floats.
"""
# Define some constants.
sigma_x = alpha * dt / dx**2
sigma_y = alpha * dt / dy**2
# Integrate in time.
T = T0.copy()
ny, nx = T.shape
I, J = int(nx / 2), int(ny / 2) # indices of the center
for n in range(nt):
T[1:-1, 1:-1] = (T[1:-1, 1:-1] +
sigma_x * (T[1:-1, 2:] - 2.0 * T[1:-1, 1:-1] + T[1:-1, :-2]) +
sigma_y * (T[2:, 1:-1] - 2.0 * T[1:-1, 1:-1] + T[:-2, 1:-1]))
# Apply Neumann conditions (zero-gradient).
T[-1, :] = T[-2, :]
T[:, -1] = T[:, -2]
# Check if the center of the domain has reached T = 70C.
if T[J, I] >= 70.0:
break
print('[time step {}] Center at T={:.2f} at t={:.2f} s'
.format(n + 1, T[J, I], (n + 1) * dt))
return T
```
See the [`break`](https://docs.python.org/3/tutorial/controlflow.html) statement? It exits the `for` loop at the closest time iteration when the plate reaches $70{\rm C}$.
In the code cell below, we define our initial conditions according to the problem set up, and choose the discretization parameters. We start with only 20 spatial steps in each coordinate direction and advance for 500 time steps. You should later experiments with these parameters at your leisure!
```
# Set parameters.
Lx = 0.01 # length of the plate in the x direction
Ly = 0.01 # height of the plate in the y direction
nx = 21 # number of points in the x direction
ny = 21 # number of points in the y direction
dx = Lx / (nx - 1) # grid spacing in the x direction
dy = Ly / (ny - 1) # grid spacing in the y direction
alpha = 1e-4 # thermal diffusivity of the plate
# Define the locations along a gridline.
x = numpy.linspace(0.0, Lx, num=nx)
y = numpy.linspace(0.0, Ly, num=ny)
# Compute the initial temperature distribution.
Tb = 100.0 # temperature at the left and bottom boundaries
T0 = 20.0 * numpy.ones((ny, nx))
T0[0, :] = Tb
T0[:, 0] = Tb
```
We don't want our solution blowing up, so let's find a time step with $\frac{\alpha \Delta t}{\Delta x^2} = \frac{\alpha \Delta t}{\Delta y^2} = \frac{1}{4}$.
```
# Set the time-step size based on CFL limit.
sigma = 0.25
dt = sigma * min(dx, dy)**2 / alpha # time-step size
nt = 500 # number of time steps to compute
# Compute the temperature along the rod.
T = ftcs(T0, nt, dt, dx, dy, alpha)
```
### Visualize the results
By now, you're no doubt *very* familiar with the `pyplot.plot` command. It's great for line plots, scatter plots, etc., but what about when we have two spatial dimensions and another value (temperature) to display?
Are you thinking contour plot? We're thinking contour plot. Check out the documentation on [`pyplot.contourf`](http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.contour) (the 'f' denotes "filled" contours).
```
# Plot the filled contour of the temperature.
pyplot.figure(figsize=(8.0, 5.0))
pyplot.xlabel('x [m]')
pyplot.ylabel('y [m]')
levels = numpy.linspace(20.0, 100.0, num=51)
contf = pyplot.contourf(x, y, T, levels=levels)
cbar = pyplot.colorbar(contf)
cbar.set_label('Temperature [C]')
pyplot.axis('scaled', adjustable='box');
```
That looks pretty cool! Note that in the call to `pyplot.contourf` you can specify the number of contour levels to display (we chose `51`). Look at that visualization: does it make physical sense to you, considering that the upper and right sides of the chip are insulated, in our problem?
##### Dig deeper
In the problem we just demonstrated, the chip reaches a temperature of $70{\rm C}$ at a given time, but will it keep increasing? That spells trouble.
Imagine that you have a heat sink instead of an insulator acting on the upper and right sides. What should be the heat flux that the heat sink achieves there, so that the temperature does not exceed $70{\rm C}$ at the center of the chip?
---
###### The cell below loads the style of the notebook
```
from IPython.core.display import HTML
css_file = '../../styles/numericalmoocstyle.css'
HTML(open(css_file, 'r').read())
```
| github_jupyter |
# A Basic Model
In this example application it is shown how a simple time series model can be developed to simulate groundwater levels. The recharge (calculated as precipitation minus evaporation) is used as the explanatory time series.
```
import matplotlib.pyplot as plt
import pandas as pd
import pastas as ps
ps.show_versions()
```
### 1. Importing the dependent time series data
In this codeblock a time series of groundwater levels is imported using the `read_csv` function of `pandas`. As `pastas` expects a `pandas` `Series` object, the data is squeezed. To check if you have the correct data type (a `pandas Series` object), you can use `type(oseries)` as shown below.
The following characteristics are important when importing and preparing the observed time series:
- The observed time series are stored as a `pandas Series` object.
- The time step can be irregular.
```
# Import groundwater time seriesm and squeeze to Series object
gwdata = pd.read_csv('../data/head_nb1.csv', parse_dates=['date'],
index_col='date', squeeze=True)
print('The data type of the oseries is: %s' % type(gwdata))
# Plot the observed groundwater levels
gwdata.plot(style='.', figsize=(10, 4))
plt.ylabel('Head [m]');
plt.xlabel('Time [years]');
```
### 2. Import the independent time series
Two explanatory series are used: the precipitation and the potential evaporation. These need to be `pandas Series` objects, as for the observed heads.
Important characteristics of these time series are:
- All series are stored as `pandas Series` objects.
- The series may have irregular time intervals, but then it will be converted to regular time intervals when creating the time series model later on.
- It is preferred to use the same length units as for the observed heads.
```
# Import observed precipitation series
precip = pd.read_csv('../data/rain_nb1.csv', parse_dates=['date'],
index_col='date', squeeze=True)
print('The data type of the precip series is: %s' % type(precip))
# Import observed evaporation series
evap = pd.read_csv('../data/evap_nb1.csv', parse_dates=['date'],
index_col='date', squeeze=True)
print('The data type of the evap series is: %s' % type(evap))
# Calculate the recharge to the groundwater
recharge = precip - evap
print('The data type of the recharge series is: %s' % type(recharge))
# Plot the time series of the precipitation and evaporation
plt.figure()
recharge.plot(label='Recharge', figsize=(10, 4))
plt.xlabel('Time [years]')
plt.ylabel('Recharge (m/year)');
```
### 3. Create the time series model
In this code block the actual time series model is created. First, an instance of the `Model` class is created (named `ml` here). Second, the different components of the time series model are created and added to the model. The imported time series are automatically checked for missing values and other inconsistencies. The keyword argument fillnan can be used to determine how missing values are handled. If any nan-values are found this will be reported by `pastas`.
```
# Create a model object by passing it the observed series
ml = ps.Model(gwdata, name="GWL")
# Add the recharge data as explanatory variable
sm = ps.StressModel(recharge, ps.Gamma, name='recharge', settings="evap")
ml.add_stressmodel(sm)
```
### 4. Solve the model
The next step is to compute the optimal model parameters. The default solver uses a non-linear least squares method for the optimization. The python package `scipy` is used (info on `scipy's` least_squares solver can be found [here](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.least_squares.html)). Some standard optimization statistics are reported along with the optimized parameter values and correlations.
```
ml.solve()
```
### 5. Plot the results
The solution can be plotted after a solution has been obtained.
```
ml.plot()
```
### 6. Advanced plotting
There are many ways to further explore the time series model. `pastas` has some built-in functionalities that will provide the user with a quick overview of the model. The `plots` subpackage contains all the options. One of these is the method `plots.results` which provides a plot with more information.
```
ml.plots.results(figsize=(10, 6))
```
### 7. Statistics
The `stats` subpackage includes a number of statistical functions that may applied to the model. One of them is the `summary` method, which gives a summary of the main statistics of the model.
```
ml.stats.summary()
```
### 8. Improvement: estimate evaporation factor
In the previous model, the recharge was estimated as precipitation minus potential evaporation. A better model is to estimate the actual evaporation as a factor (called the evaporation factor here) times the potential evaporation. First, new model is created (called `ml2` here so that the original model `ml` does not get overwritten). Second, the `RechargeModel` object with a `Linear` recharge model is created, which combines the precipitation and evaporation series and adds a parameter for the evaporation factor `f`. The `RechargeModel` object is added to the model, the model is solved, and the results and statistics are plotted to the screen. Note that the new model gives a better fit (lower root mean squared error and higher explained variance), but that the Akiake information criterion indicates that the addition of the additional parameter does not improve the model signficantly (the Akaike criterion for model `ml2` is higher than for model `ml`).
```
# Create a model object by passing it the observed series
ml2 = ps.Model(gwdata)
# Add the recharge data as explanatory variable
ts1 = ps.RechargeModel(precip, evap, ps.Gamma, name='rainevap',
recharge=ps.rch.Linear(), settings=("prec", "evap"))
ml2.add_stressmodel(ts1)
# Solve the model
ml2.solve()
# Plot the results
ml2.plot()
# Statistics
ml2.stats.summary()
```
### Origin of the series
* The rainfall data is taken from rainfall station Heibloem in The Netherlands.
* The evaporation data is taken from weather station Maastricht in The Netherlands.
* The head data is well B58C0698, which was obtained from Dino loket
| github_jupyter |
```
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
%matplotlib inline
from glob import glob
all_q = {}
x_dirs = glob('x/*/')
x_dirs[0].split('/')
'1qtable'.split('1')
for x_dir in x_dirs:
chain_length = x_dir.split('/')[1]
qtables = glob(f'{x_dir}{chain_length}*')
print(qtables)
all_q[chain_length] = {}
for qtable in qtables:
spacing = qtable.split(f'{x_dir}{chain_length}')[1].split('qtable')[0]
with open(qtable) as fp:
#The first 14 lines of the qTable do not contain spectrum data
print(qtable)
for blank in range(0,14):
fp.readline()
wave = []
Q_ext = []
Q_abs = []
Q_sca = []
for k in range(350,801):
line = fp.readline()
ary = line.split(" ")
ary = [a for a in ary if a]
# print(ary[1:5])
ary = np.array(ary[1:5]).astype(np.float)
wave.append(float(ary[0]))
Q_ext.append(float(ary[1]))
Q_abs.append(float(ary[2]))
Q_sca.append(float(ary[3]))
df = pd.DataFrame({'wave': wave, 'Q_ext': Q_ext, 'Q_abs': Q_abs, 'Q_sca': Q_sca})
all_q[chain_length][spacing] = df
from scipy.interpolate import UnivariateSpline
unreg = all_q['24']['1'].dropna()
spl = UnivariateSpline(unreg['wave'], unreg['Q_ext'])
wl = np.arange(0.350, 0.800, 0.001)
# inp = ((wl - w_mean)/w_std).reshape(-1, 1)
spl.set_smoothing_factor(0.00001)
preds = spl(wl)
plt.plot(all_q['24']['1']['wave'], all_q['24']['1']['Q_ext'], 'g')
plt.plot(wl, preds, 'b')
all_q['24']['1'].loc[all_q['24']['1']['Q_ext'].isnull(), 'Q_ext']
preds[all_q['24']['1']['Q_ext'].isnull()]
for n in all_q:
for spacing in all_q[n]:
df = all_q[n][spacing]
df_copy = df.dropna()
spl = UnivariateSpline(np.array(df_copy['wave']), np.array(df_copy['Q_abs']))
wl = np.arange(0.350, 0.800, 0.001)
spl.set_smoothing_factor(0.000001)
preds = spl(wl)
df.loc[df['Q_ext'].isnull(), 'Q_ext'] = preds[df['Q_ext'].isnull()]
all_q[n][spacing] = df
all_q['5']['1'][350:370]
df_list = {}
for n in all_q:
n_list = []
for spacing in all_q[n]:
cp = all_q[n][spacing].copy()
cp['spacing'] = float(spacing)
n_list.append(cp)
df = pd.concat(n_list, axis=0)
df_list[n] = df
df_list['3'].head()
formatted_df = {}
for n in df_list:
df = df_list[n]
new_df = pd.DataFrame()
for space in [1.0, 2.0, 3.0, 4.0]:
ser = df.loc[df['spacing'] == space, 'Q_ext']
if not ser.empty:
new_df[str(space)] = ser
formatted_df[n] = new_df
df = df_list['5']
new_df = pd.DataFrame()
for space in [1.0, 2.0, 3.0, 4.0]:
ser = df.loc[df['spacing'] == space, 'Q_ext']
if not ser.empty:
new_df[str(space)] = ser
df = formatted_df['5']
df[350:370]
for i in range(0, 451):
print(i)
print(df.loc[i])
from scipy import interpolate
x = {}
for n in range(2,31):
df = formatted_df[str(n)]
y = []
print(n)
for i in range(0, 451):
columns = np.array(df.columns).astype(np.float)
vals = np.array(df.loc[i])
f = interpolate.interp1d(columns, vals, kind='quadratic', fill_value='extrapolate')
df_out = f(np.arange(0.8, 4.05, 0.05))
y.append(df_out)
y = np.array(y)
x[n] = y
def mapper(inp):
return '%.2f' % (0.8 + 0.05 * float(inp))
final = {}
for n in x:
d = pd.DataFrame(x[n])
d = d.rename(columns=mapper)
wl_df = pd.DataFrame({'wl':np.arange(.350, .800, .001)})
out = wl_df.join(d)
print(out)
out.to_csv(f'x_{n}_new_interp.csv')
from scipy.interpolate import BivariateSpline
from scipy import interpolate
ones = df_list[0][df_list[0]['spacing'] == 1.0].dropna()
twos = df_list[0][df_list[0]['spacing'] == 2.0]
threes = df_list[0][df_list[0]['spacing'] == 3.0]
fours = df_list[0][df_list[0]['spacing'] == 4.0]
# spl = BivariateSpline(ones['wave'], ones['spacing'], ones['Q_abs'], s=0.000001)
# tck = interpolate.bisplrep(ones['wave'], ones['spacing'], ones['Q_abs'], s=0.1)
# znew = interpolate.bisplev(ones['wave'], ones['spacing'], tck)
# wl = np.arange(0.350, 0.800, 0.001)
# preds = spl(ones['wave'], ones['spacing'])
plt.plot(ones['wave'], ones['Q_abs'])
plt.plot(twos['wave'], twos['Q_abs'])
plt.plot(threes['wave'], threes['Q_abs'])
plt.plot(fours['wave'], fours['Q_abs'])
# plt.plot(ones['wave'], znew)
spl = UnivariateSpline([1.0, 2.0, 3.0, 4.0], [ones['Q_abs'][180], twos['Q_abs'][180], threes['Q_abs'][180], fours['Q_abs'][180]])
spl.set_smoothing_factor(0.01)
plt.plot([1.0, 2.0, 3.0, 4.0], [ones['Q_abs'][180], twos['Q_abs'][180], threes['Q_abs'][180], fours['Q_abs'][180]])
plt.plot([1.0, 2.0, 3.0, 4.0], [spl(1.0), spl(2.0), spl(3.0), spl(4.0)])
df_list[0]
```
| github_jupyter |
```
# import package
# installed via pip
from emtracks.particle import * # main solver object
from emtracks.conversions import one_gev_c2_to_kg # conversion for q factor (transverse momentum estimate)
from emtracks.tools import *#InitConds # initial conditions namedtuple
from emtracks.mapinterp import get_df_interp_func # factory function for creating Mu2e DS interpolation function
from emtracks.Bdist import get_B_df_distorted
from emtracks.interpolations import *
import matplotlib.animation as animation
import numpy as np
from scipy.constants import c, elementary_charge
import pandas as pd
import pickle as pkl
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import math
from mpl_toolkits.mplot3d import Axes3D
plt.rcParams['figure.figsize'] = [24,16] # bigger figures
from matplotlib import style
style.use('fivethirtyeight')
import os
from joblib import Parallel, delayed
import multiprocessing
from tqdm.notebook import tqdm
rad13plotdir = '/home/shared_data/mao10,mao13_analysis/plots/mao13(0.90,1.10TS)rad/'
reg13plotdir = '/home/shared_data/mao10,mao13_analysis/plots/mao13(0.90,1.10TS)/'
mao13datadir = '/home/shared_data/mao10,mao13_analysis/data/mao13contourplots4/'
files = sorted(os.listdir(mao13datadir)) #all your files
#check initconds match with title theta/phi
asdf = []
for file in files:
e_solvernom = trajectory_solver.from_pickle(mao13datadir+file)
theta = float(file.split('_')[1])
phi = float(file.split('_')[2])
thetainitcond = round(e_solvernom.init_conds.theta0, 3)
phiinitcond = round(e_solvernom.init_conds.phi0, 3)
asdf.append([(theta-thetainitcond), (phi-phiinitcond)])
asdf = np.array(asdf)
asdf
asdf.mean(), asdf.std()
asdf.mean(), asdf.std()
e_solvernom = trajectory_solver.from_pickle(mao13datadir+files[500])
e_solvernom.dataframe
e_solvernom.init_conds.theta0
files[0].split('_')
bounce = True
files_new = []
for file in files:
if file[0:5] != '1.000':
files_new.append(file)
files = files_new
info = []
deleted = []
for file in files:
e_solvernom = trajectory_solver.from_pickle(mao13datadir+file)
field = file.split('_')[0]
phi = e_solvernom.init_conds.phi0
theta = e_solvernom.init_conds.theta0
if e_solvernom.dataframe.z.max() < 7.00:
bounce = 0
else:
bounce = 1
info.append([field, theta, phi, bounce])
df = pd.DataFrame(info, columns = ['field', 'theta', 'phi', 'bounce'])
df['field'].unique()
dfnew9 = df[df['field']=='0.90']
dfnew1 = df[df['field']=='1.00'] #want this bounce
dfnew11 = df[df['field']=='1.10']# want this not bounce
mask1 = (dfnew1.bounce == 1).values
mask2 = (dfnew11.bounce == 0).values
(mask1 & mask2).sum()
dfnow = dfnew1[mask1 & mask2]
dfnew1[mask1 & mask2]
def getDSfield(file):
return file.split('_')[1].split('x')[0]
def getPSfield(file):
return file.split('_')[2].split('x')[0]
def getfiles(files, field, thetas, phis):
fieldrounded = round(field, 3)
thetasrounded = [round(num, 3) for num in thetas]
phisrounded = [round(num, 3) for num in phis]
filedata = []
for file in files:
if np.isclose(float(file.split('_')[0]), field, 1e-5):
if float(getDSfield(file)) in thetasrounded:
if float(getPSfield(file)) in phisrounded:
filedata.append(file)
return filedata
filedata = getfiles(files, 1.00, dfnow['theta'], dfnow['phi'])
filedata2 = getfiles(files, 1.10, dfnow['theta'], dfnow['phi'])
tempfiles = filedata[0:3]
tempfiles2 = filedata2[0:3]
tempfiles
e_solvernom = trajectory_solver.from_pickle(mao13datadir+tempfiles[2])
e_solvernom2 = trajectory_solver.from_pickle(mao13datadir+tempfiles2[2])
e_solvernom.dataframe = e_solvernom.dataframe[::2]
e_solvernom2.dataframe = e_solvernom2.dataframe
fig, ax = e_solvernom.plot3d(cmap = 'Spectral')
fig, ax = e_solvernom2.plot3d(fig = fig, ax = ax)
e_solvernom.dataframe.z.max(), e_solvernom2.dataframe.z.max()
zees = {}
for field in df['field'].unique():
df2 = df[df['field']==field]
dfbounce = df2[(df2['bounce']==1) & (df2['field']==field)]
bounce = []
for i in range(0, len(dfbounce['theta'].values), 1):
bounce.append([dfbounce['theta'].values[i], dfbounce['phi'].values[i]]) #all pairs of [theta, phi] that bounce
thetas = np.array(df2['theta'].unique())
phis = np.array(df2['phi'].unique())
z = np.zeros((len(phis), len(thetas)))
for phi in range(0, len(phis), 1):
for theta in range(0, len(thetas), 1):
if [thetas[theta], phis[phi]] in bounce:
z[phi][theta] = 1
zees.update({f'{field}':z})
zees
import matplotlib.patches as mpatches
from matplotlib.lines import Line2D
fig = plt.figure()
ax1 = plt.subplot2grid((4,4), (0,0), rowspan=1, colspan=1)
ax2 = plt.subplot2grid((4,4), (0,1), rowspan=1, colspan=1)
ax3 = plt.subplot2grid((4,4), (0,2), rowspan=1, colspan=1)
ax4 = plt.subplot2grid((4,4), (0,3), rowspan=1, colspan=1)
ax5 = plt.subplot2grid((4,4), (1,0), rowspan=1, colspan=1)
ax6 = plt.subplot2grid((4,4), (1,1), rowspan=1, colspan=1)
ax7 = plt.subplot2grid((4,4), (1,2), rowspan=1, colspan=1)
ax8 = plt.subplot2grid((4,4), (1,3), rowspan=1, colspan=1)
ax9 = plt.subplot2grid((4,4), (2,0), rowspan=1, colspan=1)
ax10 = plt.subplot2grid((4,4), (2,1), rowspan=1, colspan=1)
ax11 = plt.subplot2grid((4,4), (2,2), rowspan=1, colspan=1)
ax12 = plt.subplot2grid((4,4), (2,3), rowspan=1, colspan=1)
ax13 = plt.subplot2grid((4,4), (3,0), rowspan=1, colspan=1)
ax14 = plt.subplot2grid((4,4), (3,1), rowspan=1, colspan=1)
ax15 = plt.subplot2grid((4,4), (3,2), rowspan=1, colspan=1)
ax16 = plt.subplot2grid((4,4), (3,3), rowspan=1, colspan=1)
ax1.contourf(thetas, phis, zees['0.90'], cmap = 'inferno')
ax1.set_title(f'0.90')
ax1.set_xlabel(f'theta (rad)')
ax1.set_ylabel(f'phi (rad)')
ax1.contour(thetas, phis, zees['1.00'], cmap = 'viridis')
ax2.contourf(thetas, phis, zees['0.91'], cmap = 'inferno')
ax2.set_title(f'0.91')
ax2.set_xlabel(f'theta (rad)')
ax2.set_ylabel(f'phi (rad)')
ax2.contour(thetas, phis, zees['1.00'], cmap = 'viridis')
ax3.contourf(thetas, phis, zees['0.92'], cmap = 'inferno')
ax3.set_title(f'0.92')
ax3.set_xlabel(f'theta (rad)')
ax3.set_ylabel(f'phi (rad)')
ax3.contour(thetas, phis, zees['1.00'], cmap = 'viridis')
ax4.contourf(thetas, phis, zees['0.93'], cmap = 'inferno')
ax4.set_title(f'0.93')
ax4.set_xlabel(f'theta (rad)')
ax4.set_ylabel(f'phi (rad)')
ax4.contour(thetas, phis, zees['1.00'], cmap = 'viridis')
ax5.contourf(thetas, phis, zees['0.94'], cmap = 'inferno')
ax5.set_title(f'0.94')
ax5.set_xlabel(f'theta (rad)')
ax5.set_ylabel(f'phi (rad)')
ax5.contour(thetas, phis, zees['1.00'], cmap = 'viridis')
ax6.contourf(thetas, phis, zees['0.95'], cmap = 'inferno')
ax6.set_title(f'0.95')
ax6.set_xlabel(f'theta (rad)')
ax6.set_ylabel(f'phi (rad)')
ax6.contour(thetas, phis, zees['1.00'], cmap = 'viridis')
ax7.contourf(thetas, phis, zees['0.96'], cmap = 'inferno')
ax7.set_title(f'0.96')
ax7.set_xlabel(f'theta (rad)')
ax7.set_ylabel(f'phi (rad)')
ax7.contour(thetas, phis, zees['1.00'], cmap = 'viridis')
ax8.contourf(thetas, phis, zees['0.97'], cmap = 'inferno')
ax8.set_title(f'0.97')
ax8.set_xlabel(f'theta (rad)')
ax8.set_ylabel(f'phi (rad)')
ax8.contour(thetas, phis, zees['1.00'], cmap = 'viridis')
ax9.contourf(thetas, phis, zees['0.98'], cmap = 'inferno')
ax9.set_title(f'0.98')
ax9.set_xlabel(f'theta (rad)')
ax9.set_ylabel(f'phi (rad)')
ax9.contour(thetas, phis, zees['1.00'], cmap = 'viridis')
ax10.contourf(thetas, phis, zees['0.99'], cmap = 'inferno')
ax10.set_title(f'0.99')
ax10.set_xlabel(f'theta (rad)')
ax10.set_ylabel(f'phi (rad)')
ax10.contour(thetas, phis, zees['1.00'], cmap = 'viridis')
ax11.contourf(thetas, phis, zees['1.00'], cmap = 'inferno')
ax11.set_title(f'1.00')
ax11.set_xlabel(f'theta (rad)')
ax11.set_ylabel(f'phi (rad)')
ax11.contour(thetas, phis, zees['1.00'], cmap = 'viridis')
ax12.contourf(thetas, phis, zees['1.01'], cmap = 'inferno')
ax12.set_title(f'1.01')
ax12.set_xlabel(f'theta (rad)')
ax12.set_ylabel(f'phi (rad)')
ax12.contour(thetas, phis, zees['1.00'], cmap = 'viridis')
ax13.contourf(thetas, phis, zees['1.02'], cmap = 'inferno')
ax13.set_title(f'1.02')
ax13.set_xlabel(f'theta (rad)')
ax13.set_ylabel(f'phi (rad)')
ax13.contour(thetas, phis, zees['1.00'], cmap = 'viridis')
ax14.contourf(thetas, phis, zees['1.05'], cmap = 'inferno')
ax14.set_title(f'1.05')
ax14.set_xlabel(f'theta (rad)')
ax14.set_ylabel(f'phi (rad)')
ax14.contour(thetas, phis, zees['1.00'], cmap = 'viridis')
ax15.contourf(thetas, phis, zees['1.08'], cmap = 'inferno')
ax15.set_title(f'1.08')
ax15.set_xlabel(f'theta (rad)')
ax15.set_ylabel(f'phi (rad)')
ax15.contour(thetas, phis, zees['1.00'], cmap = 'viridis')
ax16.contourf(thetas, phis, zees['1.10'], cmap = 'inferno')
ax16.set_title(f'1.10')
ax16.set_xlabel(f'theta (rad)')
ax16.set_ylabel(f'phi (rad)')
ax16.contour(thetas, phis, zees['1.00'], cmap = 'viridis')
cmap = plt.cm.get_cmap('inferno')
rgba = cmap(0.0)
rgba2 = cmap(1.0)
bounces = mpatches.Patch(color=rgba, label = 'scaled not bounce')
notbounces = mpatches.Patch(color=rgba2, label = 'scaled bounce')
nomcmap = plt.cm.get_cmap('viridis')
rgba3 = nomcmap(1.0)
rgba4 = nomcmap(0.0)
overlay = Line2D([0], [0], color='lawngreen', lw = 2, label = 'nominal bounce border')
overlay2 = Line2D([0], [0], color='blue', lw = 2, label = 'nominal not bounce border')
fig.legend(handles = [notbounces, bounces, overlay, overlay2], ncol = 2)
fig.tight_layout(pad = 4.0)
fig.suptitle('Particles that Bounce in Different Distorted TS Field Scenarios', fontsize = '25')
zeees = {}
for field in df['field'].unique():
thetadif = (thetas[-1] - thetas[0])/(len(thetas))
phidif = (phis[-1] - phis[0])/(len(phis))
scaledthetas = []
scaledphis = []
for theta in thetas:
scaledthetas.append(theta-thetadif)
scaledthetas.append(thetas[-1] + thetadif)
for phi in phis:
scaledphis.append(phi-phidif)
scaledphis.append(phis[-1] + phidif)
zeees.update({f'{field}': [scaledthetas, scaledphis]})
fig = plt.figure()
ax1 = plt.subplot2grid((4,4), (0,0), rowspan=1, colspan=1)
ax2 = plt.subplot2grid((4,4), (0,1), rowspan=1, colspan=1)
ax3 = plt.subplot2grid((4,4), (0,2), rowspan=1, colspan=1)
ax4 = plt.subplot2grid((4,4), (0,3), rowspan=1, colspan=1)
ax5 = plt.subplot2grid((4,4), (1,0), rowspan=1, colspan=1)
ax6 = plt.subplot2grid((4,4), (1,1), rowspan=1, colspan=1)
ax7 = plt.subplot2grid((4,4), (1,2), rowspan=1, colspan=1)
ax8 = plt.subplot2grid((4,4), (1,3), rowspan=1, colspan=1)
ax9 = plt.subplot2grid((4,4), (2,0), rowspan=1, colspan=1)
ax10 = plt.subplot2grid((4,4), (2,1), rowspan=1, colspan=1)
ax11 = plt.subplot2grid((4,4), (2,2), rowspan=1, colspan=1)
ax12 = plt.subplot2grid((4,4), (2,3), rowspan=1, colspan=1)
ax13 = plt.subplot2grid((4,4), (3,0), rowspan=1, colspan=1)
ax14 = plt.subplot2grid((4,4), (3,1), rowspan=1, colspan=1)
ax15 = plt.subplot2grid((4,4), (3,2), rowspan=1, colspan=1)
ax16 = plt.subplot2grid((4,4), (3,3), rowspan=1, colspan=1)
ax1.pcolormesh(zeees['0.90'][0], zeees['0.90'][1], zees['0.90'], cmap = 'inferno')
ax1.set_title(f'0.90')
ax1.set_xlabel(f'theta (rad)')
ax1.set_ylabel(f'phi (rad)')
ax2.pcolormesh(zeees['0.91'][0], zeees['0.91'][1], zees['0.91'], cmap = 'inferno')
ax2.set_title(f'0.91')
ax2.set_xlabel(f'theta (rad)')
ax2.set_ylabel(f'phi (rad)')
ax3.pcolormesh(zeees['0.92'][0], zeees['0.92'][1], zees['0.92'], cmap = 'inferno')
ax3.set_title(f'0.92')
ax3.set_xlabel(f'theta (rad)')
ax3.set_ylabel(f'phi (rad)')
ax4.pcolormesh(zeees['0.93'][0], zeees['0.93'][1], zees['0.93'], cmap = 'inferno')
ax4.set_title(f'0.93')
ax4.set_xlabel(f'theta (rad)')
ax4.set_ylabel(f'phi (rad)')
ax5.pcolormesh(zeees['0.94'][0], zeees['0.94'][1], zees['0.94'], cmap = 'inferno')
ax5.set_title(f'0.94')
ax5.set_xlabel(f'theta (rad)')
ax5.set_ylabel(f'phi (rad)')
ax6.pcolormesh(zeees['0.95'][0], zeees['0.95'][1], zees['0.95'], cmap = 'inferno')
ax6.set_title(f'0.95')
ax6.set_xlabel(f'theta (rad)')
ax6.set_ylabel(f'phi (rad)')
ax7.pcolormesh(zeees['0.96'][0], zeees['0.96'][1], zees['0.96'], cmap = 'inferno')
ax7.set_title(f'0.96')
ax7.set_xlabel(f'theta (rad)')
ax7.set_ylabel(f'phi (rad)')
ax8.pcolormesh(zeees['0.97'][0], zeees['0.97'][1], zees['0.97'], cmap = 'inferno')
ax8.set_title(f'0.97')
ax8.set_xlabel(f'theta (rad)')
ax8.set_ylabel(f'phi (rad)')
ax9.pcolormesh(zeees['0.98'][0], zeees['0.98'][1], zees['0.98'], cmap = 'inferno')
ax9.set_title(f'0.98')
ax9.set_xlabel(f'theta (rad)')
ax9.set_ylabel(f'phi (rad)')
ax10.pcolormesh(zeees['0.99'][0], zeees['0.99'][1], zees['0.99'], cmap = 'inferno')
ax10.set_title(f'0.99')
ax10.set_xlabel(f'theta (rad)')
ax10.set_ylabel(f'phi (rad)')
ax11.pcolormesh(zeees['1.00'][0], zeees['1.00'][1], zees['1.00'], cmap = 'inferno')
ax11.set_title(f'1.00')
ax11.set_xlabel(f'theta (rad)')
ax11.set_ylabel(f'phi (rad)')
ax12.pcolormesh(zeees['1.01'][0], zeees['1.01'][1], zees['1.01'], cmap = 'inferno')
ax12.set_title(f'1.01')
ax12.set_xlabel(f'theta (rad)')
ax12.set_ylabel(f'phi (rad)')
ax13.pcolormesh(zeees['1.02'][0], zeees['1.02'][1], zees['1.02'], cmap = 'inferno')
ax13.set_title(f'1.02')
ax13.set_xlabel(f'theta (rad)')
ax13.set_ylabel(f'phi (rad)')
ax14.pcolormesh(zeees['1.05'][0], zeees['1.05'][1], zees['1.05'], cmap = 'inferno')
ax14.set_title(f'1.05')
ax14.set_xlabel(f'theta (rad)')
ax14.set_ylabel(f'phi (rad)')
ax15.pcolormesh(zeees['1.08'][0], zeees['1.08'][1], zees['1.08'], cmap = 'inferno')
ax15.set_title(f'1.08')
ax15.set_xlabel(f'theta (rad)')
ax15.set_ylabel(f'phi (rad)')
ax16.pcolormesh(zeees['1.10'][0], zeees['1.10'][1], zees['1.10'], cmap = 'inferno')
ax16.set_title(f'1.10')
ax16.set_xlabel(f'theta (rad)')
ax16.set_ylabel(f'phi (rad)')
cmap = plt.cm.get_cmap('inferno')
rgba = cmap(0.0)
rgba2 = cmap(1.0)
bounces = mpatches.Patch(color=rgba, label = 'not bounce')
notbounces = mpatches.Patch(color=rgba2, label = ' bounce')
fig.legend(handles = [notbounces, bounces])
fig.tight_layout(pad = 5.0)
fig.suptitle('Particles that Bounce in Different Distorted TS Field Scenarios', fontsize = '25')
```
| github_jupyter |
```
from IPython import display
from torch.utils.data import DataLoader
from torchvision import transforms, datasets
from utils import Logger
import tensorflow as tf
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np
DATA_FOLDER = './tf_data/VGAN/MNIST'
IMAGE_PIXELS = 28*28
NOISE_SIZE = 100
BATCH_SIZE = 100
def noise(n_rows, n_cols):
return np.random.normal(size=(n_rows, n_cols))
def xavier_init(size):
in_dim = size[0] if len(size) == 1 else size[1]
stddev = 1. / np.sqrt(float(in_dim))
return tf.random_uniform(shape=size, minval=-stddev, maxval=stddev)
def images_to_vectors(images):
return images.reshape(images.shape[0], 784)
def vectors_to_images(vectors):
return vectors.reshape(vectors.shape[0], 28, 28, 1)
```
## Load Data
```
def mnist_data():
compose = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((.5,), (.5,))
])
out_dir = '{}/dataset'.format(DATA_FOLDER)
return datasets.MNIST(root=out_dir, train=True, transform=compose, download=True)
# Load data
data = mnist_data()
# Create loader with data, so that we can iterate over it
data_loader = DataLoader(data, batch_size=BATCH_SIZE, shuffle=True)
# Num batches
num_batches = len(data_loader)
```
## Initialize Graph
```
## Discriminator
# Input
X = tf.placeholder(tf.float32, shape=(None, IMAGE_PIXELS))
# Layer 1 Variables
D_W1 = tf.Variable(xavier_init([784, 1024]))
D_B1 = tf.Variable(xavier_init([1024]))
# Layer 2 Variables
D_W2 = tf.Variable(xavier_init([1024, 512]))
D_B2 = tf.Variable(xavier_init([512]))
# Layer 3 Variables
D_W3 = tf.Variable(xavier_init([512, 256]))
D_B3 = tf.Variable(xavier_init([256]))
# Out Layer Variables
D_W4 = tf.Variable(xavier_init([256, 1]))
D_B4 = tf.Variable(xavier_init([1]))
# Store Variables in list
D_var_list = [D_W1, D_B1, D_W2, D_B2, D_W3, D_B3, D_W4, D_B4]
## Generator
# Input
Z = tf.placeholder(tf.float32, shape=(None, NOISE_SIZE))
# Layer 1 Variables
G_W1 = tf.Variable(xavier_init([100, 256]))
G_B1 = tf.Variable(xavier_init([256]))
# Layer 2 Variables
G_W2 = tf.Variable(xavier_init([256, 512]))
G_B2 = tf.Variable(xavier_init([512]))
# Layer 3 Variables
G_W3 = tf.Variable(xavier_init([512, 1024]))
G_B3 = tf.Variable(xavier_init([1024]))
# Out Layer Variables
G_W4 = tf.Variable(xavier_init([1024, 784]))
G_B4 = tf.Variable(xavier_init([784]))
# Store Variables in list
G_var_list = [G_W1, G_B1, G_W2, G_B2, G_W3, G_B3, G_W4, G_B4]
def discriminator(x):
l1 = tf.nn.dropout(tf.nn.leaky_relu(tf.matmul(x, D_W1) + D_B1, .2), .3)
l2 = tf.nn.dropout(tf.nn.leaky_relu(tf.matmul(l1, D_W2) + D_B2, .2), .3)
l3 = tf.nn.dropout(tf.nn.leaky_relu(tf.matmul(l2, D_W3) + D_B3, .2), .3)
out = tf.matmul(l3, D_W4) + D_B4
return out
def generator(z):
l1 = tf.nn.leaky_relu(tf.matmul(z, G_W1) + G_B1, .2)
l2 = tf.nn.leaky_relu(tf.matmul(l1, G_W2) + G_B2, .2)
l3 = tf.nn.leaky_relu(tf.matmul(l2, G_W3) + G_B3, .2)
out = tf.nn.tanh(tf.matmul(l3, G_W4) + G_B4)
return out
G_sample = generator(Z)
D_real = discriminator(X)
D_fake = discriminator(G_sample)
# Losses
D_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_real, labels=tf.ones_like(D_real)))
D_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake, labels=tf.zeros_like(D_fake)))
D_loss = D_loss_real + D_loss_fake
G_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake, labels=tf.ones_like(D_fake)))
# Optimizers
D_opt = tf.train.AdamOptimizer(2e-4).minimize(D_loss, var_list=D_var_list)
G_opt = tf.train.AdamOptimizer(2e-4).minimize(G_loss, var_list=G_var_list)
```
## Train
#### Testing
```
num_test_samples = 16
test_noise = noise(num_test_samples, NOISE_SIZE)
```
#### Inits
```
num_epochs = 200
# Start interactive session
session = tf.InteractiveSession()
# Init Variables
tf.global_variables_initializer().run()
# Init Logger
logger = Logger(model_name='DCGAN1', data_name='CIFAR10')
```
#### Train
```
# Iterate through epochs
for epoch in range(num_epochs):
for n_batch, (batch,_) in enumerate(data_loader):
# 1. Train Discriminator
X_batch = images_to_vectors(batch.permute(0, 2, 3, 1).numpy())
feed_dict = {X: X_batch, Z: noise(BATCH_SIZE, NOISE_SIZE)}
_, d_error, d_pred_real, d_pred_fake = session.run(
[D_opt, D_loss, D_real, D_fake], feed_dict=feed_dict
)
# 2. Train Generator
feed_dict = {Z: noise(BATCH_SIZE, NOISE_SIZE)}
_, g_error = session.run(
[G_opt, G_loss], feed_dict=feed_dict
)
if n_batch % 100 == 0:
display.clear_output(True)
# Generate images from test noise
test_images = session.run(
G_sample, feed_dict={Z: test_noise}
)
test_images = vectors_to_images(test_images)
# Log Images
logger.log_images(test_images, num_test_samples, epoch, n_batch, num_batches, format='NHWC');
# Log Status
logger.display_status(
epoch, num_epochs, n_batch, num_batches,
d_error, g_error, d_pred_real, d_pred_fake
)
```
| github_jupyter |
## Omega and Xi
To implement Graph SLAM, a matrix and a vector (omega and xi, respectively) are introduced. The matrix is square and labelled with all the robot poses (xi) and all the landmarks (Li). Every time you make an observation, for example, as you move between two poses by some distance `dx` and can relate those two positions, you can represent this as a numerical relationship in these matrices.
It's easiest to see how these work in an example. Below you can see a matrix representation of omega and a vector representation of xi.
<img src='images/omega_xi.png' width=20% height=20% />
Next, let's look at a simple example that relates 3 poses to one another.
* When you start out in the world most of these values are zeros or contain only values from the initial robot position
* In this example, you have been given constraints, which relate these poses to one another
* Constraints translate into matrix values
<img src='images/omega_xi_constraints.png' width=70% height=70% />
If you have ever solved linear systems of equations before, this may look familiar, and if not, let's keep going!
### Solving for x
To "solve" for all these x values, we can use linear algebra; all the values of x are in the vector `mu` which can be calculated as a product of the inverse of omega times xi.
<img src='images/solution.png' width=30% height=30% />
---
**You can confirm this result for yourself by executing the math in the cell below.**
```
import numpy as np
# define omega and xi as in the example
omega = np.array([[1,0,0],
[-1,1,0],
[0,-1,1]])
xi = np.array([[-3],
[5],
[3]])
# calculate the inverse of omega
omega_inv = np.linalg.inv(np.matrix(omega))
# calculate the solution, mu
mu = omega_inv*xi
# print out the values of mu (x0, x1, x2)
print(mu)
```
## Motion Constraints and Landmarks
In the last example, the constraint equations, relating one pose to another were given to you. In this next example, let's look at how motion (and similarly, sensor measurements) can be used to create constraints and fill up the constraint matrices, omega and xi. Let's start with empty/zero matrices.
<img src='images/initial_constraints.png' width=35% height=35% />
This example also includes relationships between poses and landmarks. Say we move from x0 to x1 with a displacement `dx` of 5. Then we have created a motion constraint that relates x0 to x1, and we can start to fill up these matrices.
<img src='images/motion_constraint.png' width=50% height=50% />
In fact, the one constraint equation can be written in two ways. So, the motion constraint that relates x0 and x1 by the motion of 5 has affected the matrix, adding values for *all* elements that correspond to x0 and x1.
### 2D case
In these examples, we've been showing you change in only one dimension, the x-dimension. In the project, it will be up to you to represent x and y positional values in omega and xi. One solution could be to create an omega and xi that are 2x larger, so that they can hold both x and y values for poses. I might suggest drawing out a rough solution to graph slam as you read the instructions in the next notebook; that always helps me organize my thoughts. Good luck!
| github_jupyter |
# Chapter 3-2 Multiple Linear Regression
Concepts and data from "An Introduction to Statistical Learning, with applications in R" (Springer, 2013) with permission from the authors: G. James, D. Witten, T. Hastie and R. Tibshirani " available at [www.StatLearning.com](http://www.StatLearning.com).
For Tables reference see [http://data8.org/datascience/tables.html](http://data8.org/datascience/tables.html)
```
# HIDDEN
# For Tables reference see http://data8.org/datascience/tables.html
# This useful nonsense should just go at the top of your notebook.
from datascience import *
%matplotlib inline
import matplotlib.pyplot as plots
import numpy as np
from sklearn import linear_model
plots.style.use('fivethirtyeight')
plots.rc('lines', linewidth=1, color='r')
from ipywidgets import interact, interactive, fixed
import ipywidgets as widgets
# datascience version number of last run of this notebook
version.__version__
import sys
sys.path.append("..")
from ml_table import ML_Table
import locale
locale.setlocale( locale.LC_ALL, 'en_US.UTF-8' )
# Getting the data
advertising = ML_Table.read_table("./data/Advertising.csv")
advertising = advertising.drop(0)
advertising
```
## 3.2.1 Estimating the Regression Coefficients
The multiple linear regression model takes the form
$Y = β_0 + β_1X_1 +···+β_{p}X_{p} + ε$,
where $X_j$ represents the jth predictor and $β_j$ quantifies the association between that variable and the response. We interpret βj as the average effect on Y of a one unit increase in Xj, holding all other predictors fixed.
In the advertising example, this becomes
$sales= β0 + β1×TV + β2×radio + β3×newspaper + ε$.
```
advertising.linear_regression('Sales').params
adver_model = advertising.linear_regression('Sales').model
adver_model(0,0,0)
```
### Visualizing a 2D regression
```
ad2 = advertising.drop('Newspaper')
ad2.linear_regression('Sales').summary()
# Linear model with two input variables is a plane
ad2.plot_fit('Sales', ad2.linear_regression('Sales').model, width=8)
```
### Multiple regression inference and goodness of fit
At this point "ISL" skips over how to compute the standard error of the multiple regression parameters - relying on R to just produce the answer. It requires some matrix notation and a numerical computation of the matrix inverse, but involves a bunch of standard terminology that is specific to the inference aspect, as opposed to the general notion in linear algebra of approximating a function over a basis.
A nice treatment can be found at this [reference](http://dept.stat.lsa.umich.edu/~kshedden/Courses/Stat401/Notes/401-multreg.pdf)
```
# response vector
Y = advertising['Sales']
labels = [lbl for lbl in advertising.labels if lbl != 'Sales']
p = len(labels) # number of parameter
n = len(Y) # number of observations
labels
# Transform the table into a matrix
advertising.select(labels).rows
# Design matrix
X = np.array([np.append([1], row) for row in advertising.select(labels).rows])
# slope vector
b0, slopes = advertising.linear_regression('Sales').params
b = np.append([b0], slopes)
np.shape(X), np.shape(b)
# residual
res = np.dot(X, b) - advertising['Sales']
# Variance of the residual
sigma2 = sum(res**2)/(n-p-1)
sigma2
Xt = np.transpose(X)
# The matrix that needs to be inverted is only p x p
np.dot(Xt, X)
np.shape(np.dot(Xt, X))
# standard error matrix
SEM = sigma2*np.linalg.inv(np.dot(Xt, X))
SEM
# variance of the coefficients are the diagonal elements
variances = [SEM[i,i] for i in range(len(SEM))]
variances
# standard error of the coeficients
SE = [np.sqrt(v) for v in variances]
SE
# t-statistics
b/SE
advertising.linear_regression('Sales').summary()
advertising.RSS_model('Sales', adver_model)
advertising.R2_model('Sales', adver_model)
```
## 3.2.2 Some Important questions
1. Is at least one of the predictors X1 , X2 , . . . , Xp useful in predicting the response?
2. Do all the predictors help to explain Y, or is only a subset of the predictors useful?
3. How well does the model fit the data?
4. Given a set of predictor values, what response value should we predict,
and how accurate is our prediction?
### Correlation matrix
Above shows that spending on newspaper appears to have no effect on sales. The apparent effect when looking at newspaper versus sales in isolation is capturing the tendency to spend more on newspaper when spending more on radio.
```
advertising.Cor()
```
### F-statistic
$F = \frac{(TSS - RSS)/p}{RSS/(n - p - 1)}$
When there is no relationship between the response and predictors, one would expect the F-statistic to take on a value close to 1. On the other hand, if Ha is true, then E{(TSS − RSS)/p} > σ2, so we expect F to be greater than 1.
```
advertising.F_model('Sales', adver_model)
advertising.lm_fit('Sales', adver_model)
# Using this tool for the 1D model within the table
advertising.lm_fit('Sales', advertising.regression_1d('Sales', 'TV'), 'TV')
```
Sometimes we want to test that a particular subset of q of the coefficients are zero. This corresponds to a null hypothesis
H0 : $β_{p−q+1} =β_{p−q+2} =...=β_{p} =0$
where for convenience we have put the variables chosen for omission at the end of the list. In this case we fit a second model that uses all the variables except those last q. Suppose that the residual sum of squares for that model is $RSS_0$. Then the appropriate F-statistic is
$F = \frac{(RSS_0 − RSS)/q}{RSS/(n−p−1)}$.
```
ad2_model = ad2.linear_regression('Sales').model
ad2.lm_fit('Sales', ad2_model)
RSS0 = ad2.RSS_model('Sales', ad2_model)
RSS = advertising.RSS_model('Sales', adver_model)
((RSS0 - RSS)/1)/(advertising.num_rows - 3 - 1)
```
## Variable selection
* *Forward selection* - start with null model and add predictors one at a time using the variable that result in the lowest RSS
* *Backward selection* - start with all variables and iteratively remove the one with the largest P-value (smallest T-statistic)
* *Mixed selection* - add like forward but skip ones with too high a P-value
```
input_labels = [lbl for lbl in advertising.labels if lbl != 'Sales']
fwd_labels = ['Sales']
for lbl in input_labels:
fwd = advertising.select(fwd_labels + [lbl])
model = fwd.linear_regression('Sales').model
print(lbl, fwd.RSS_model('Sales', model))
```
| github_jupyter |
```
import numpy as np
jpt_peptides_file_name = "jpt_sequences.txt"
jpt_mgf_file_name = "jpt_predicted_isoforms_nofixprop.mgf"
uniprot_proteins_file_name = "uniprot_histones.txt"
uniprot_mgf_file_name = "uniprot_predicted_isoforms_nofixprop.mgf"
msp_predictions_file_name = "M_Human_Histones_output_predictions.msp"
proton_mass = 1.007276
variable_ptms = {
# "M": ["M_ox"],
}
fixed_ptms = {
# "n": ["n_pr"],
# "K": ["K_pr"],
}
jpt_ptm_dict = {
'Lys(Biotinoyl)': "K_bio",
'pS': "S_ph",
'Cit': "R_cit",
'Lys(Ac)': "K_ac",
'pT': "T_ph",
'Lys(Me2)': "K_me2",
'Lys(Me)': "K_me",
'Arg(Me2a)': "R_me2a",
'Lys(But)': "K_bu",
'Arg(Me2s)': "R_me2s",
'Lys(prop)': "K_pr",
'Lys(Biotin)': "K_bio",
'Arg(Me)': "R_me",
'Lys(Me3)': "K_me3",
'Gln(Me)':"Q_me",
'Ac': "n_ac",
'H': "n_h",
'NH2': "c_nh2",
# 'Ser(ß_D_GlcNAc)': "S_glcnac", # not used ???
# 'Ttds': "X_x", # ???
# '': "X_x", # terminal ???
}
uniprot_ptm_dict = {
'N,N,N-trimethylglycine': "G_me3",
'Phosphoserine': "S_ph",
'Deamidated asparagine': "N_deam",
'Citrulline': "R_cit",
'N6-acetyllysine': "K_ac",
'Phosphothreonine': "T_ph",
'N6-crotonyllysine': "K_cr",
'N6-methyllysine': "K_me",
'N6-succinyllysine': "K_su",
'Symmetric dimethylarginine': "R_me2s",
'N5-methylglutamine': "Q_me",
'Phosphotyrosine': "T_ph",
'N6,N6-dimethyllysine': "K_me2",
'Dimethylated arginine': "R_me2",
'N6,N6,N6-trimethyllysine': "K_me3",
'Omega-N-methylarginine': "R_me",
'N6-methylated lysine': "K_me",
'N6-butyryllysine': "K_bu",
'N6-malonyllysine': "K_ma",
'Asymmetric dimethylarginine': "R_me2a",
'N6-propionyllysine': "K_pr",
'ADP-ribosylserine': "S_ar", # ???
'N6-glutaryllysine': "K_gl", # ???
'N6-(beta-hydroxybutyryl)lysine': "K_Hib", # ???
'N6-(2-hydroxyisobutyryl)lysine': "K_Hib2", # ???
# 'N-acetylmethionine': "M_ac", # protein n-terminal ???
# 'N-acetylthreonine': "T_ac", # protein n-terminal ???
# 'N-acetylserine': "S_ac", # protein n-terminal ???
# 'N-acetylproline': "P_ac", # protein n-terminal ???
# 'Allysine': "", ?
}
ptm_mass_dict = {
'G_me3': 42.0469,
'K_Hib': 86.0368,
'K_Hib2': 86.0368,
'K_ac': 42.010565,
'K_bio': 226.077598,
'K_bu': 70.0419,
'K_cr': 68.026215,
'K_gl': 95.07636,
'K_ma': 86.0004,
'K_me': 14.01565,
'K_me_pr': 14.01565 + 56.026215,
'K_me2': 28.0313,
'K_me3': 42.0469,
'K_pr': 56.026215,
'K_su': 100.0160,
'N_deam': -0.984016,
"M_ox": 0, # TODO ???
'Q_me': 14.01565,
'R_cit': 0.984016,
'R_me': 14.01565,
'R_me2': 28.0313,
'R_me2a': 28.0313,
'R_me2s': 28.0313,
'S_ar': 541.06111,
'S_ph': 79.966331,
'T_ph': 79.966331,
"c_nh2": 0, # TODO ???
"n_ac": 42.010565,
"n_h": 0,
'n_pr': 56.026215,
"": 0,
}
def update_methyl_mass_to_butyryl(ptm_mass_dict):
# TODO
ptm_mass_dict["K_me"] = ptm_mass_dict["K_bu"]
def read_jpt_sequences_and_ptms(jpt_pep_file_name, jpt_ptm_dict):
print(f"Reading JPT peptides from {jpt_pep_file_name}")
jpt_sequences = ""
jpt_ptms = []
with open(jpt_pep_file_name, "r") as infile:
for sequence_line in infile:
sequence_line = sequence_line.strip()
n_term, *sequence_parts, c_term = sequence_line.split("-")
aa, ptm = jpt_ptm_dict[n_term].split("_")
sequence = aa
ptms = [[f"{aa}_{ptm}"]]
for sequence_part in sequence_parts:
if sequence_part.isupper():
sequence += sequence_part
ptms += [[] for i in enumerate(sequence_part)]
else:
try:
aa, ptm = jpt_ptm_dict[sequence_part].split("_")
except KeyError:
print(f"Ignoring peptide {sequence_line} with unknown PTM {sequence_part}")
break
sequence += aa
ptms += [[f"{aa}_{ptm}"]]
else:
aa, ptm = jpt_ptm_dict[c_term].split("_")
sequence += aa
ptms += [[f"{aa}_{ptm}"]]
jpt_sequences += sequence
jpt_ptms += ptms
return jpt_sequences, jpt_ptms
def read_uniprot_sequences_and_ptms(uniprot_proteins_file_name, uniprot_ptm_dict):
print(f"Reading UniProt proteins from {uniprot_proteins_file_name}")
uniprot_sequences = ""
uniprot_ptms = []
with open(uniprot_proteins_file_name, "r") as infile:
for line in infile:
if line.startswith("ID"):
data = line.split()
protein_name = data[1]
protein_length = int(data[3])
sequence = ""
ptms = [[] for i in range(protein_length + 2)]
elif line.startswith("FT"):
if "MOD_RES" in line:
location = int(line.split()[-1])
ptm = next(infile).split('"')[1].split(";")[0]
try:
parsed_ptm = uniprot_ptm_dict[ptm]
ptms[location].append(parsed_ptm)
except KeyError:
print(f"Ignoring unknown PTM {ptm} at {location} on {protein_name}")
elif line.startswith("SQ"):
for line in infile:
if line.startswith("//"):
uniprot_sequences += "n" + "".join(sequence.split()) + "c"
uniprot_ptms += ptms
break
sequence += line
return uniprot_sequences, uniprot_ptms
def read_predicted_spectra(msp_predictions_file_name):
print(f"Reading predicted spectra from {msp_predictions_file_name}")
spectra = {}
with open(msp_predictions_file_name, "r") as infile:
for line in infile:
if line.startswith("Name"):
sequence, charge = line.split()[-1].split("/")
charge = int(charge)
if sequence not in spectra:
spectra[sequence] = {
"charges": {},
"b_mzs": np.zeros(len(sequence)),
"y_mzs": np.zeros(len(sequence)),
}
spectra[sequence]["charges"][charge] = {
"b_intensities": np.zeros(len(sequence)),
"y_intensities": np.zeros(len(sequence)),
}
elif line.startswith("MW"):
spectra[sequence]["mw"] = float(line.split()[-1])
elif line.startswith("Comment:"):
spectra[sequence]["proteins"] = (line.split()[3]).split('"')[1]
elif line[0].isdigit():
mz, intensity, annotation = line.split()
ion_type = annotation[1]
location = int(annotation[2:-1])
if ion_type == "b":
spectra[sequence]["b_mzs"][location] = mz
spectra[sequence]["charges"][charge]["b_intensities"][location] = intensity
elif ion_type == "y":
spectra[sequence]["y_mzs"][location] = mz
spectra[sequence]["charges"][charge]["y_intensities"][location] = intensity
return spectra
def generate_sequence_indices(query_sequence, reference_sequence):
i = reference_sequence.find(query_sequence)
while i != -1:
yield i
i = reference_sequence.find(query_sequence, i + 1)
def generate_ptm_combinations_recursively(ptms, selected=[]):
if len(selected) == len(ptms):
yield selected
return
for ptm in ptms[len(selected)]:
for ptm_combination in generate_ptm_combinations_recursively(ptms, selected + [ptm]):
yield ptm_combination
def generate_ptm_combinations(
sequence,
ptms,
variable_ptms,
fixed_ptms,
static_ptms
):
local_ptms = [[] for i in sequence]
if sequence[0] == "n":
local_ptms[0] += ptms[0]
if sequence[-1] == "c":
local_ptms[-1] = ptms[-1]
for i, ptm in enumerate(ptms[1:-1]):
local_ptms[i + 1] += ptm
for i, aa in enumerate(f"n{sequence[1:-1]}c"):
if (not static_ptms) or (len(local_ptms[i]) == 0):
if aa in variable_ptms:
local_ptms[i] += variable_ptms[aa]
if aa in fixed_ptms:
local_ptms[i] += fixed_ptms[aa]
else:
local_ptms[i].append("")
for ptm_combination in generate_ptm_combinations_recursively(local_ptms):
yield ptm_combination
def write_isoforms_to_mgf(
mgf_file_name,
query_sequences,
query_ptms,
static_ptms,
):
print(f"Writing predicted isoforms to {mgf_file_name}")
with open(mgf_file_name, "w") as mgf_file:
for sequence in spectra:
proteins = spectra[sequence]["proteins"]
mw = spectra[sequence]["mw"]
sequence_length = len(sequence)
for i in generate_sequence_indices(sequence, query_sequences):
for ptm_combination in generate_ptm_combinations(
query_sequences[i - 1: i + sequence_length + 1],
query_ptms[i - 1: i + sequence_length + 1],
variable_ptms,
fixed_ptms,
static_ptms
):
mass_shifts = np.array([ptm_mass_dict[ptm] for ptm in ptm_combination])
new_b_mzs = np.cumsum(mass_shifts[:-2]) + spectra[sequence]["b_mzs"]
new_y_mzs = np.cumsum(mass_shifts[::-1][:-2]) + spectra[sequence]["y_mzs"]
ptms = ";".join(
[
f"{ptm}@{i}" for i, ptm in enumerate(ptm_combination) if ptm != ""
]
)
for charge in spectra[sequence]["charges"]:
mgf_file.write("BEGIN IONS\n")
mgf_file.write(f"TITLE={proteins} {sequence} {ptms}\n")
mgf_file.write(f"PEPMASS={(mw + np.sum(mass_shifts) + charge * proton_mass) / charge}\n")
b_intensities = spectra[sequence]["charges"][charge]["b_intensities"]
y_intensities = spectra[sequence]["charges"][charge]["y_intensities"]
mgf_file.write(f"CHARGE={charge}+\n")
mzs = np.concatenate([new_b_mzs, new_y_mzs])
intensities = np.concatenate([b_intensities, y_intensities])
order = np.argsort(mzs)
for mz, intensity in zip(mzs[order], intensities[order]):
if intensity != 0:
mgf_file.write(f"{mz:.4f} {intensity}\n")
mgf_file.write("END IONS\n")
mgf_file.write("\n")
if ("K" in fixed_ptms) and ("K_pr" in fixed_ptms["K"]):
update_methyl_mass_to_butyryl(ptm_mass_dict)
jpt_sequences, jpt_ptms = read_jpt_sequences_and_ptms(
jpt_peptides_file_name,
jpt_ptm_dict
)
uniprot_sequences, uniprot_ptms = read_uniprot_sequences_and_ptms(
uniprot_proteins_file_name,
uniprot_ptm_dict
)
spectra = read_predicted_spectra(msp_predictions_file_name)
write_isoforms_to_mgf(
jpt_mgf_file_name,
jpt_sequences,
jpt_ptms,
True,
)
write_isoforms_to_mgf(
uniprot_mgf_file_name,
uniprot_sequences,
uniprot_ptms,
False,
)
```
| github_jupyter |
## Stack - Bootcamp de Data Science
### Machine Learning.
```
import pandas as pd
import datetime
import glob
from minio import Minio
import numpy as np
import matplotlib.pyplot as plt
client = Minio(
"localhost:9000",
access_key="minioadmin",
secret_key="minioadmin",
secure=False
)
```
### Baixando o Dataset do Data Lake.
```
client.fget_object(
"processing",
"employees_dataset.parquet",
"temp_.parquet",
)
df = pd.read_parquet("temp_.parquet")
df.head()
```
#### Organizando o dataset.
```
df = df[['department', 'salary', 'mean_work_last_3_months',
'number_projects', 'satisfaction_level', 'last_evaluation',
'time_in_company', 'work_accident','left']]
df.head()
```
#### Verificando os registros missing.
```
df.isnull().sum()
df[df.notnull()]
df = df[:14998]
```
#### Alterando os tipos de dados.
```
df["number_projects"] = df["number_projects"].astype(int)
df["mean_work_last_3_months"] = df["mean_work_last_3_months"].astype(int)
df["time_in_company"] = df["time_in_company"].astype(int)
df["work_accident"] = df["work_accident"].astype(int)
df["left"] = df["left"].astype(int)
df.info()
df.head()
df = df[:14998]
```
#### Renomeando atributos
```
df = df.rename(columns={'satisfaction_level': 'satisfaction',
'last_evaluation': 'evaluation',
'number_projects': 'projectCount',
'mean_work_last_3_months': 'averageMonthlyHours',
'time_in_company': 'yearsAtCompany',
'work_accident': 'workAccident',
'left' : 'turnover'
})
df.head()
```
### Importancia de Features
#### Converte os atributos em categoricos.
```
df["department"] = df["department"].astype('category').cat.codes
df["salary"] = df["salary"].astype('category').cat.codes
df.head()
```
#### Separando os conjuntos de dados.
```
target_name = 'turnover'
X = df.drop('turnover', axis=1)
y = df[target_name]
```
#### Transformando os dados.
```
#Transforma os valores das features para minimo 0 e maximo 1.
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
X = scaler.fit_transform(X)
X
```
#### Separando os conjuntos.
```
#Separa os dados em dados para treino e para teste.
from sklearn.model_selection import train_test_split
#20% para teste do modelo
X_train, X_test, y_train, y_test = train_test_split(
X
,y
,test_size = 0.2
,random_state = 123
,stratify = y #mantém balanceamento entre as classes (turnover 1 e 0) na separação dos dados
)
```
#### Treinando o algoritmo de arvore de decisão.
```
from sklearn.tree import DecisionTreeClassifier
dtree = DecisionTreeClassifier()
dtree = dtree.fit(X_train,y_train)
importances = dtree.feature_importances_
feat_names = df.drop(['turnover'],axis=1).columns
indices = np.argsort(importances)[::-1]
plt.figure(figsize=(12,4))
plt.title("Feature importances by DecisionTreeClassifier")
plt.bar(range(len(indices)), importances[indices], color='lightblue', align="center")
plt.xticks(range(len(indices)), feat_names[indices], rotation='vertical',fontsize=14)
plt.xlim([-1, len(indices)])
plt.show()
```
#### Filtrando apenas os atributos relevantes.
```
#Seleciona apenas as features que se mostraram relevantes no gráfico de importâncias.
X = df[["satisfaction","evaluation","averageMonthlyHours","yearsAtCompany"]]
```
#### Separando os conjuntos de dados.
```
#tem que refazer o processo, pois o 'X' foi alterado direto no df.
scaler = MinMaxScaler()
X = scaler.fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(
X
,y
,test_size = 0.2
,random_state = 123
,stratify = y
)
X_train
```
#### Função do modelo de base.
```
#'Linha de base'
def base_rate_model(X) :
y = np.zeros(X.shape[0])
return y
```
#### Importando métodos de métrica de avaliação.
```
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
def accuracy_result(y_test,y_predict):
acc = accuracy_score(y_test, y_predict)
print ("Accuracy = %2.2f" % acc)
def roc_classification_report_results(model,y_test,y_predict):
roc_ = roc_auc_score(y_test, y_predict)
classfication_report = classification_report(y_test, y_predict)
print ("\n{} AUC = {}\n".format(model, roc_))
print(classfication_report)
```
#### Análise do modelo de baseline
```
y_predict = base_rate_model(X_test)
accuracy_result(y_test, y_predict)
#Accuracy não é uma boa métrica, visto que a base de dados é desbalanceada. Ela pode nos enganar neste caso.
#Como podemos ver abaixo o modelo não acerta o nosso target que é 1.
roc_classification_report_results("Base Model", y_test, y_predict)
```
### Modelo de Regressão Logística.
#### Instânciando o algoritmo.
```
from sklearn.linear_model import LogisticRegression
logis = LogisticRegression()
```
#### Realizando o treinamento.
```
logis.fit(X_train, y_train)
```
#### Calculando as predições.
```
y_predict = logis.predict(X_test)
```
#### Avaliando o resultado.
```
#O treino é feito com as features de treino e as classes de treino e no modelo de predição se passa o conjunto de teste para que o modelo retorne a classe
accuracy_result(y_test, y_predict)
roc_classification_report_results("Logistic Regression", y_test, y_predict)
```
### Modelo de Arvore de decisão.
#### Instânciando o algoritmo.
```
from sklearn.tree import DecisionTreeClassifier
dtree = DecisionTreeClassifier()
```
#### Realizando o treinamento.
```
dtree = dtree.fit(X_train,y_train)
```
#### Calculando as predições.
```
y_predict = dtree.predict(X_test)
```
#### Avaliando o resultado.
```
accuracy_result(y_test, y_predict)
roc_classification_report_results("Decision Tree", y_test, y_predict)
```
### Modelo de Arvore Aleatória (Random Forest)
#### Instânciando o algoritmo.
```
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier()
```
#### Realizando o treinamento.
```
rf = rf.fit(X_train,y_train)
```
#### Calculando as predições.
```
y_predict = rf.predict(X_test)
```
#### Avaliando o resultado.
```
accuracy_result(y_test, y_predict)
roc_classification_report_results("Random Forest", y_test, y_predict)
```
### Pycaret
```
!pip install pycaret
```
#### Importando os métodos.
```
from pycaret.classification import *
```
#### Definindo o Setup.
```
#dica: primeiro executa, confere o datatype depois adiciona transformação de type.
s = setup( df[["satisfaction","evaluation","averageMonthlyHours","yearsAtCompany","turnover"]]
,target = "turnover"
,numeric_features = ["yearsAtCompany","averageMonthlyHours"] #transformo ele em numerico, pois antes ele leu como categorico
,normalize = True
,normalize_method = "minmax"
,data_split_stratify = True
,fix_imbalance = True,
)
```
#### Comparando diferentes modelos.
```
best = compare_models(fold = 5,sort = 'AUC',)
```
#### Criando o modelo.
```
gbc = create_model('gbc', fold = 5)
```
#### Realizando o tunning do modelo.
```
tuned_gbc = tune_model(gbc
,fold = 5
,custom_grid = {"learning_rate":[0.1,0.2,0.5]
,"n_estimators":[100,500,1000]
,"min_samples_split":[1,2,5,10]
,"max_depth":[1,3,9]
}
,optimize = 'AUC')
```
Neste caso não fez sentido o tunning, pois piorou o resultado.
#### Finalizando o modelo.
```
final_model = finalize_model(tuned_gbc)
save_model(final_model,'model')
```
#### Transferindo os arquivos para o Data Lake.
#### Modelo de Classificação.
```
client.fput_object(
"curated",
"model.pkl",
"model.pkl"
)
```
#### Exportando o conjunto de dados para o disco.
```
df.to_csv("dataset.csv",index=False)
client.fput_object(
"curated",
"dataset.csv",
"dataset.csv"
)
```
| github_jupyter |
## Set up the dependencies
```
# for reading and validating data
import emeval.input.spec_details as eisd
import emeval.input.phone_view as eipv
import emeval.input.eval_view as eiev
import arrow
# Visualization helpers
import emeval.viz.phone_view as ezpv
import emeval.viz.eval_view as ezev
# For plots
import matplotlib.pyplot as plt
%matplotlib inline
# For maps
import folium
import branca.element as bre
# For easier debugging while working on modules
import importlib
import pandas as pd
import numpy as np
```
## The spec
The spec defines what experiments were done, and over which time ranges. Once the experiment is complete, most of the structure is read back from the data, but we use the spec to validate that it all worked correctly. The spec also contains the ground truth for the legs. Here, we read the spec for the trip to UC Berkeley.
```
DATASTORE_LOC = "bin/data/"
AUTHOR_EMAIL = "[email protected]"
sd_la = eisd.FileSpecDetails(DATASTORE_LOC, AUTHOR_EMAIL, "unimodal_trip_car_bike_mtv_la")
sd_sj = eisd.FileSpecDetails(DATASTORE_LOC, AUTHOR_EMAIL, "car_scooter_brex_san_jose")
sd_ucb = eisd.FileSpecDetails(DATASTORE_LOC, AUTHOR_EMAIL, "train_bus_ebike_mtv_ucb")
```
## Loading the data into a dataframe
```
pv_la = eipv.PhoneView(sd_la)
pv_sj = eipv.PhoneView(sd_sj)
sd_sj.CURR_SPEC_ID
ios_loc_entries = sd_sj.retrieve_data("ucb-sdb-ios-1", ["background/location"],
arrow.get("2019-08-07T14:50:57.445000-07:00").timestamp,
arrow.get("2019-08-07T15:00:16.787000-07:00").timestamp)
ios_location_df = pd.DataFrame([e["data"] for e in ios_loc_entries])
android_loc_entries = sd_sj.retrieve_data("ucb-sdb-android-1", ["background/location"],
arrow.get("2019-08-07T14:50:57.445000-07:00").timestamp,
arrow.get("2019-08-07T15:00:16.787000-07:00").timestamp)
android_location_df = pd.DataFrame([e["data"] for e in android_loc_entries])
android_location_df[["fmt_time"]].loc[30:60]
ios_map = ezpv.display_map_detail_from_df(ios_location_df.loc[20:35])
android_map = ezpv.display_map_detail_from_df(android_location_df.loc[25:50])
fig = bre.Figure()
fig.add_subplot(1, 2, 1).add_child(ios_map)
fig.add_subplot(1, 2, 2).add_child(android_map)
pv_ucb = eipv.PhoneView(sd_ucb)
import pandas as pd
def get_battery_drain_entries(pv):
battery_entry_list = []
for phone_os, phone_map in pv.map().items():
print(15 * "=*")
print(phone_os, phone_map.keys())
for phone_label, phone_detail_map in phone_map.items():
print(4 * ' ', 15 * "-*")
print(4 * ' ', phone_label, phone_detail_map.keys())
# this spec does not have any calibration ranges, but evaluation ranges are actually cooler
for r in phone_detail_map["evaluation_ranges"]:
print(8 * ' ', 30 * "=")
print(8 * ' ',r.keys())
print(8 * ' ',r["trip_id"], r["eval_common_trip_id"], r["eval_role"], len(r["evaluation_trip_ranges"]))
bcs = r["battery_df"]["battery_level_pct"]
delta_battery = bcs.iloc[0] - bcs.iloc[-1]
print("Battery starts at %d, ends at %d, drain = %d" % (bcs.iloc[0], bcs.iloc[-1], delta_battery))
battery_entry = {"phone_os": phone_os, "phone_label": phone_label, "timeline": pv.spec_details.curr_spec["id"],
"run": r["trip_run"], "duration": r["duration"],
"role": r["eval_role_base"], "battery_drain": delta_battery}
battery_entry_list.append(battery_entry)
return battery_entry_list
# We are not going to look at battery life at the evaluation trip level; we will end with evaluation range
# since we want to capture the overall drain for the timeline
battery_entries_list = []
battery_entries_list.extend(get_battery_drain_entries(pv_la))
battery_entries_list.extend(get_battery_drain_entries(pv_sj))
battery_entries_list.extend(get_battery_drain_entries(pv_ucb))
battery_drain_df = pd.DataFrame(battery_entries_list)
battery_drain_df.head()
r2q_map = {"power_control": 0, "HAMFDC": 1, "MAHFDC": 2, "HAHFDC": 3, "accuracy_control": 4}
# right now, only the san jose data has the full comparison
q2r_complete_list = ["power", "HAMFDC", "MAHFDC", "HAHFDC", "accuracy"]
# others only have android or ios
q2r_android_list = ["power", "HAMFDC", "HAHFDC", "accuracy"]
q2r_ios_list = ["power", "MAHFDC", "HAHFDC", "accuracy"]
# Make a number so that can get the plots to come out in order
battery_drain_df["quality"] = battery_drain_df.role.apply(lambda r: r2q_map[r])
battery_drain_df.query("role == 'MAHFDC'").head()
```
## Displaying various groupings using boxplots
```
ifig, ax_array = plt.subplots(nrows=2,ncols=3,figsize=(12,6), sharex=False, sharey=True)
timeline_list = ["train_bus_ebike_mtv_ucb", "car_scooter_brex_san_jose", "unimodal_trip_car_bike_mtv_la"]
for i, tl in enumerate(timeline_list):
battery_drain_df.query("timeline == @tl & phone_os == 'android'").boxplot(ax = ax_array[0][i], column=["battery_drain"], by=["quality"], showbox=False, whis="range")
ax_array[0][i].set_title(tl)
battery_drain_df.query("timeline == @tl & phone_os == 'ios'").boxplot(ax = ax_array[1][i], column=["battery_drain"], by=["quality"], showbox=False, whis="range")
ax_array[1][i].set_title("")
for i, ax in enumerate(ax_array[0]):
if i == 1:
ax.set_xticklabels(q2r_complete_list)
else:
ax.set_xticklabels(q2r_android_list)
ax.set_xlabel("")
for i, ax in enumerate(ax_array[1]):
if i == 1:
ax.set_xticklabels(q2r_complete_list)
else:
ax.set_xticklabels(q2r_ios_list)
ax.set_xlabel("")
ax_array[0][0].set_ylabel("Battery drain (android)")
ax_array[1][0].set_ylabel("Battery drain (iOS)")
ifig.suptitle("Power v/s quality over multiple timelines")
# ifig.tight_layout()
battery_drain_df.query("quality == 1 & phone_os == 'ios' & timeline == 'car_scooter_brex_san_jose'").iloc[1:].describe()
battery_drain_df.query("quality == 0 & phone_os == 'ios' & timeline == 'car_scooter_brex_san_jose'").iloc[1:].describe()
battery_drain_df.query("quality == 2 & phone_os == 'ios' & timeline == 'car_scooter_brex_san_jose'").iloc[1:].describe()
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.